code stringlengths 5 1.03M | repo_name stringlengths 5 90 | path stringlengths 4 158 | license stringclasses 15 values | size int64 5 1.03M | n_ast_errors int64 0 53.9k | ast_max_depth int64 2 4.17k | n_whitespaces int64 0 365k | n_ast_nodes int64 3 317k | n_ast_terminals int64 1 171k | n_ast_nonterminals int64 1 146k | loc int64 -1 37.3k | cycloplexity int64 -1 1.31k |
|---|---|---|---|---|---|---|---|---|---|---|---|---|
{-# LANGUAGE GeneralizedNewtypeDeriving
, FlexibleContexts
, FlexibleInstances
, MultiParamTypeClasses
, UndecidableInstances
, RankNTypes
, ScopedTypeVariables
#-}
module Data.BTree.Cache.STM where
import Data.Hashable (Hashable)
import qualified Data.List as L
import qualified Data.Map as M
import qualified Data.Graph as G
import qualified Data.BTree.Cache.Class as C
import qualified Data.BTree.HashTable.STM as H
import qualified Data.Serialize as S
import qualified Data.BTree.KVBackend.Class as KV
import Control.Concurrent.STM
import Control.Concurrent
import Control.Monad
import Control.Monad.Trans
import Control.Monad.Reader
import qualified Data.ByteString as B
import Data.Either
import Data.Maybe
import Data.Time.Clock
import Debug.Trace
import System.IO
import Control.Monad.Error
data State k v = Read !(Maybe v)
| Write !(Maybe k) !(Maybe v)
deriving Eq
instance Show (State k v) where
show (Read a) = "Read: " ++ mtoS a
show (Write k a) = "Write: " ++ mtoS a
mtoS a = if isJust a then "Just" else "Nothing"
type AccessTime = UTCTime
data Exist = Exist | NoExist
deriving (Eq)
data Ref k v = Ref {
refST :: TVar (Either (State k v)
(State k v, Int, State k v)), -- Current state
refExt :: TVar Exist -- Exists in external storage?
}
data Param m k v = Param {
cacheSize :: Int
, table :: H.HashTableSTM k (Ref k (Either B.ByteString v))
, toIO :: forall a. m a -> IO a
, flushQ :: TVar [(k, Ref k v)]
, timestamp :: UTCTime
, genId :: TVar Int -- Generation ID
, genActive :: TVar Int -- Active users of maintained generation
}
trace = id
newtype CacheSTM m k v a = CacheSTM { runCacheSTM :: ReaderT (Param m k v) (ErrorT (IO ()) STM) a }
deriving (Monad, MonadReader (Param m k v), MonadError (IO ()), Functor)
instance Error (IO ())
stm m = lift $ lift m
evalCacheSTM :: Param m k v -> CacheSTM m k v a -> IO a
evalCacheSTM p m = do
mer <- atomically $ runErrorT $ runReaderT (runCacheSTM m) p
case mer of
Left io -> io >> evalCacheSTM p m -- perform IO and retry
Right a -> return a -- return final result
sizedParam :: Int -> (forall a. m a -> IO a) -> IO (Param m k v)
sizedParam s f = do
ts <- getCurrentTime
ht <- H.newSized s
mv <- atomically $ newTVar []
gn <- atomically $ newTVar 0
ga <- atomically $ newTVar 0
return $ Param s ht f mv ts gn ga
getRef k = do
p <- ask
ht <- asks table
ev <- asks toIO
mr <- stm $ H.lookup ht k
case mr of
Nothing -> fetchRef p ev ht
Just r -> return r
where
fetchRef p ev ht = throwError $ (ev $ KV.fetch k) >>= createRef p ht
createRef p ht bytes = do
atomically $ do
l <- H.lookup ht k
case l of
Just _ -> return ()
Nothing -> do
tvst <- newTVar $ Left $ Read $ Left `fmap` bytes
tvex <- newTVar Exist
let ref = Ref tvst tvex
H.insert ht k ref
newRef t k v = do
ht <- asks table
r@(Ref tv _) <- getRef k
-- maybeQueue False tv (k, r)
update tv $! Write t v
where
update tv x = do
gt <- asks genId
ga <- asks genActive
stm $ do
gid <- readTVar gt -- Generation ID
act <- readTVar ga -- Active users
s <- readTVar tv -- State of ref
case s of
Left o | act == 0 -> writeTVar tv $! Left $! x
| otherwise -> writeTVar tv $! Right $! (x, gid, o)
Right (x', n, o)
| act == 0 -> writeTVar tv $! Left $! x
| gid /= n -> writeTVar tv $! Right $! (x, gid, x')
| otherwise -> writeTVar tv $! Right $! (x, gid, o )
maybeQueue force t x =
if force then update
else do
s <- stm $ readTVar t
case s of
Left (Write _ _) -> return () -- Already in queue
Right (Write _ _, _, _) -> return ()
_ -> update
where
update = do
qt <- asks flushQ
stm $ do q <- readTVar qt
writeTVar qt $! x : q
store t k v = CacheSTM $ newRef t k $! Just $! Right v
-- fetch :: (Eq k, NFData k, Hashable k, KV.KVBackend IO k v) => k -> CacheSTM m k v (Maybe v)
fetch k = CacheSTM $ do
r@(Ref tv _) <- getRef k
s <- stm $ readTVar tv
case s of
Left x -> return $! value x
Right (x, _, _) -> return $! value x
fetchGen n k = CacheSTM $ do
r@(Ref tv _) <- getRef k
s <- stm $ readTVar tv
return $ value $ getGen n s
remove t k = CacheSTM $ newRef t k Nothing
updateTag t k = CacheSTM $ do
ht <- asks table
x <- stm $ H.lookup ht k
case x of
Nothing -> return ()
Just (Ref tv _) -> do
s <- stm $ readTVar tv
case s of
Left (Write _ v) -> stm $ writeTVar tv $! Left $! Write t v
Right (Write _ v, n, o) -> stm $ writeTVar tv $! Right $! (Write t v, n, o)
_ -> return ()
keys = CacheSTM $ do
ht <- asks table
stm $ H.keys ht
debug a = liftIO $ do print a
hFlush stdout
getGen n (Left s) = s
getGen n (Right (news, m, olds))
| n == m = olds
| otherwise = news
flipWrite x (Left (Write _ x')) | x == x' = Left $! (Read x)
flipWrite x (Right (Write _ x', n, o)) | x == x' = Right $! (Read x', n, o)
flipWrite x (Right (c, n, Write _ x')) | x == x' = Right $! (c, n, Read x')
flipWrite _ s = s
equals a b = value a == value b
value v = case v of
Read x -> either decode id `fmap` x
Write _ x -> either decode id `fmap` x
where
decode = either error id . S.decode
withGeneration p f = do
-- start generation
n <- liftIO $ atomically $ do
a <- readTVar $ genActive p
n <- readTVar $ genId p
writeTVar (genActive p) $! a + 1
if a > 0 then return n
else do writeTVar (genId p) $! n + 1
return $! n + 1
-- compute
x <- f n
-- end generation
liftIO $ atomically $ do
a <- readTVar $ genActive p
writeTVar (genActive p) $! a - 1
-- yield result
return x
flush p = do
nowSize <- atomically $ H.size ht
gen <- atomically $ readTVar $ genId p
when (nowSize > maxSize) $ do
flush =<< (atomically $ H.toList ht)
where
ht = table p
maxSize = cacheSize p
flush ks = do
mapM_ (evalCacheSTM p . flushKey ht) ks
flushKey ht (k, r@(Ref tvst tvex)) = CacheSTM $ do
tvga <- asks genActive
tvgi <- asks genId
stm $ do
act <- readTVar tvga
gen <- readTVar tvgi
s <- readTVar tvst
case s of
Left (Read _) ->
H.delete ht k >> return Nothing
Right (Read s, n, _) | act == 0 || n /= gen ->
H.delete ht k >> return Nothing
Right (Write t s, n, _) | act == 0 || n /= gen ->
(writeTVar tvst $! Left $! Write t s) >> return Nothing
Left (Write t (Just (Right v))) ->
(writeTVar tvst $! Left $! Write t $! Just $! Left $! S.encode v)
>> return Nothing
Right (Write t (Just (Right v)), n, o) ->
(writeTVar tvst $! Right $! (Write t $! Just $! Left $! S.encode v, n, o))
>> return Nothing
_ -> return $ Just s
sync p = do
withGeneration p $ \gen -> do
-- sync
-- TODO: use flush queue
ks <- atomically $ H.toList $ table p
ls <- forM ks $ \(k, r@(Ref tv _)) -> do
s <- atomically $ readTVar tv
case getGen gen s of
Write (Just t) _ -> return $! Just $! Left (t, k, r)
Write Nothing _ -> return $! Just $! Right (k, r)
_ -> return $! Nothing
let (lefts, rights) = partitionEithers $ catMaybes ls
mapM_ (evalCacheSTM p . go gen) $ sortByTag lefts
mapM_ (evalCacheSTM p . go gen) $ rights
where
sortByTag :: Ord k => [(k, k, Ref k v)] -> [(k, Ref k v)]
sortByTag ls =
let m = M.fromList $ zip (map (\(_, k, _) -> k) ls) [0..]
(g, f, _) = G.graphFromEdges
[((k, r), i, maybe [] return $ M.lookup t m)
| ((t, k, r), i) <- zip ls [0..]]
in map (\(p, _, _) -> p) $ map f $ G.topSort g
go gen (k, (r@(Ref tv tvex))) = CacheSTM $ do
s <- stm $ readTVar tv
ex <- stm $ readTVar tvex
case getGen gen s of
-- TODO: handle exist
Write _ Nothing | ex == Exist -> throwError write
| otherwise -> stm $ update Nothing
Write _ (Just v) -> throwError write
_ -> return ()
where
write = do
s <- atomically $ readTVar tv
case getGen gen s of
Write _ Nothing -> (toIO p $ KV.remove k ) >> (atomically $ update Nothing)
Write _ (Just v) -> (toIO p $ KV.store k $ either id S.encode $ v)
>> (atomically $ update (Just v))
_ -> return ()
update v = do
s <- readTVar tv
writeTVar tv $! flipWrite v s
liftSTM = CacheSTM . stm
fail = CacheSTM . throwError
instance ( Show k, S.Serialize k, S.Serialize v, Ord k, Eq k, Eq v
, Hashable k, KV.KVBackend m k B.ByteString) =>
C.Cache (CacheSTM m k v) (Param m k v) k v where
store = store
fetch = fetch
remove = remove
sync = sync
eval = evalCacheSTM | brinchj/btree-concurrent | Data/BTree/Cache/STM.hs | lgpl-3.0 | 9,400 | 15 | 21 | 3,230 | 4,001 | 1,980 | 2,021 | 255 | 7 |
<?xml version="1.0" encoding="UTF-8"?><!DOCTYPE helpset PUBLIC "-//Sun Microsystems Inc.//DTD JavaHelp HelpSet Version 2.0//EN" "http://java.sun.com/products/javahelp/helpset_2_0.dtd">
<helpset version="2.0" xml:lang="tr-TR">
<title>TLS Debug | ZAP Uzantısı</title>
<maps>
<homeID>top</homeID>
<mapref location="map.jhm"/>
</maps>
<view>
<name>TOC</name>
<label>İçindekiler</label>
<type>org.zaproxy.zap.extension.help.ZapTocView</type>
<data>toc.xml</data>
</view>
<view>
<name>Index</name>
<label>Dizin</label>
<type>javax.help.IndexView</type>
<data>index.xml</data>
</view>
<view>
<name>Search</name>
<label>Arama</label>
<type>javax.help.SearchView</type>
<data engine="com.sun.java.help.search.DefaultSearchEngine">
JavaHelpSearch
</data>
</view>
<view>
<name>Favorites</name>
<label>Favoriler</label>
<type>javax.help.FavoritesView</type>
</view>
</helpset> | secdec/zap-extensions | addOns/tlsdebug/src/main/javahelp/org/zaproxy/zap/extension/tlsdebug/resources/help_tr_TR/helpset_tr_TR.hs | apache-2.0 | 975 | 83 | 52 | 159 | 403 | 212 | 191 | -1 | -1 |
-----------------------------------------------------------------------------
--
-- Module : Manipulation
-- Description :
-- Copyright : (c) Tobias Reinhardt, 2015 <tobioso92_@hotmail.com
-- License : Apache License, Version 2.0
--
-- Maintainer : Tobias Reinhardt <tobioso92_@hotmail.com>
-- Portability : tested only on linux
-- |
--
-----------------------------------------------------------------------------
module Manipulation(
getDefaultSection,
lookupIndex,
removeProperty
)where
import Data.Maybe (fromMaybe)
import Types
getDefaultSection :: [Section] -> [Property]
getDefaultSection xs = fromMaybe [] (lookup "" xs)
lookupIndex :: Eq a => a -> [(a,b)] -> Maybe Int
lookupIndex = lookupIndex' 0
lookupIndex' :: Eq a => Int -> a -> [(a,b)] -> Maybe Int
lookupIndex' _ _ [] = Nothing
lookupIndex' i x ((y1,_):ys)
| x == y1 = Just i
| otherwise = lookupIndex' (i+1) x ys
removeProperty :: Eq a => a -> [(a, b)] -> [(a, b)]
removeProperty _ [] = []
removeProperty x (y@(y1,_):ys)
| x == y1 = ys
| otherwise = y : removeProperty x ys
| tobiasreinhardt/show | IniConfiguration/src/Manipulation.hs | apache-2.0 | 1,098 | 8 | 11 | 209 | 340 | 186 | 154 | 20 | 1 |
-- | Settings are centralized, as much as possible, into this file. This
-- includes database connection settings, static file locations, etc.
-- In addition, you can configure a number of different aspects of Yesod
-- by overriding methods in the Yesod typeclass. That instance is
-- declared in the Foundation.hs file.
module Settings
( widgetFile
, PersistConfig
, staticRoot
, staticDir
, Extra (..)
, parseExtra
, cassiusFile
, juliusFile
) where
import Prelude
import Text.Shakespeare.Text (st)
import Language.Haskell.TH.Syntax
import Database.Persist.Postgresql (PostgresConf)
import Yesod.Default.Config
import Yesod.Default.Util
import Data.Text (Text)
import Data.Yaml
import Settings.Development
import Data.Default (def)
import Text.Hamlet
import qualified Text.Cassius as C
import qualified Text.Julius as J
-- | Which Persistent backend this site is using.
type PersistConfig = PostgresConf
-- Static setting below. Changing these requires a recompile
-- | The location of static files on your system. This is a file system
-- path. The default value works properly with your scaffolded site.
staticDir :: FilePath
staticDir = "static"
-- | The base URL for your static files. As you can see by the default
-- value, this can simply be "static" appended to your application root.
-- A powerful optimization can be serving static files from a separate
-- domain name. This allows you to use a web server optimized for static
-- files, more easily set expires and cache values, and avoid possibly
-- costly transference of cookies on static files. For more information,
-- please see:
-- http://code.google.com/speed/page-speed/docs/request.html#ServeFromCookielessDomain
--
-- If you change the resource pattern for StaticR in Foundation.hs, you will
-- have to make a corresponding change here.
--
-- To see how this value is used, see urlRenderOverride in Foundation.hs
staticRoot :: AppConfig DefaultEnv x -> Text
staticRoot conf = [st|#{appRoot conf}/static|]
-- | Settings for 'widgetFile', such as which template languages to support and
-- default Hamlet settings.
widgetFileSettings :: WidgetFileSettings
widgetFileSettings = def
{ wfsHamletSettings = defaultHamletSettings
{ hamletNewlines = AlwaysNewlines
}
}
-- The rest of this file contains settings which rarely need changing by a
-- user.
widgetFile, cassiusFile, juliusFile :: String -> Q Exp
widgetFile = (if development then widgetFileReload
else widgetFileNoReload)
widgetFileSettings
cassiusFile = (if development then C.cassiusFileReload
else C.cassiusFile)
juliusFile = (if development then J.juliusFileReload
else J.juliusFile)
data Extra = Extra
{ extraCopyright :: Text
, extraAnalytics :: Maybe Text -- ^ Google Analytics
} deriving Show
parseExtra :: DefaultEnv -> Object -> Parser Extra
parseExtra _ o = Extra
<$> o .: "copyright"
<*> o .:? "analytics"
| mrkkrp/haskellers | Settings.hs | bsd-2-clause | 3,042 | 0 | 9 | 612 | 368 | 235 | 133 | -1 | -1 |
-- | Parse and encode JSON data in "Overpass API OSM JSON" format
--
-- http://overpass-api.de/output_formats.html#json
{-# LANGUAGE TypeSynonymInstances #-}
{-# LANGUAGE FlexibleInstances #-}
{-# LANGUAGE OverloadedStrings #-}
module OSM.OverpassJSON
where
import Data.Int (Int64)
import Data.Aeson
import Data.Aeson.Types (typeMismatch)
import qualified OSM
import qualified Data.Text as T
import Data.Scientific
import Data.HashMap.Strict as M
instance ToJSON OSM.NodeID where toJSON (OSM.NodeID i) = toJSON i
instance FromJSON OSM.NodeID where parseJSON v = OSM.NodeID <$> parseJSON v
instance ToJSON OSM.WayID where toJSON (OSM.WayID i) = toJSON i
instance FromJSON OSM.WayID where parseJSON v = OSM.WayID <$> parseJSON v
instance ToJSON OSM.RelationID where toJSON (OSM.RelationID i) = toJSON i
instance FromJSON OSM.RelationID where parseJSON v = OSM.RelationID <$> parseJSON v
instance ToJSON OSM.TagKey where toJSON (OSM.TagKey k) = toJSON k
instance FromJSON OSM.TagKey where parseJSON v = OSM.TagKey <$> parseJSON v
instance ToJSONKey OSM.TagKey
instance FromJSONKey OSM.TagKey
instance ToJSON OSM.RelationMember where
toJSON (OSM.RelationMember id (OSM.RelationRole role)) = object
[ "type" .= memberType id
, "id" .= memberID id
, "role" .= role ]
where
memberType :: OSM.ElementID -> T.Text
memberType (OSM.ElementNodeID _) = "node"
memberType (OSM.ElementWayID _) = "way"
memberType (OSM.ElementRelationID _) = "relation"
memberID (OSM.ElementNodeID (OSM.NodeID i)) = i
memberID (OSM.ElementWayID (OSM.WayID i)) = i
memberID (OSM.ElementRelationID (OSM.RelationID i)) = i
instance FromJSON OSM.RelationMember where
parseJSON = withObject "RelationMember" $ \v ->
let mkId =
case M.lookup "type" v of
Just "node" -> Right $ OSM.ElementNodeID . OSM.NodeID
Just "way" -> Right $ OSM.ElementWayID . OSM.WayID
Just "relation" -> Right $ OSM.ElementRelationID . OSM.RelationID
Just a -> Left a
Nothing -> Left Null
in
case mkId of
Right mkId' -> OSM.RelationMember
<$> (mkId' <$> (v .: "id"))
<*> (OSM.RelationRole <$> v .: "role")
Left val -> typeMismatch "RelationMemberType" val
instance ToJSON (OSM.Node ()) where
toJSON n = object
[ "type" .= String "node"
, "id" .= OSM.id n
, "tags" .= OSM.tags n
, "lat" .= OSM.latitude (OSM.coordinates n)
, "lon" .= OSM.longitude (OSM.coordinates n) ]
instance FromJSON (OSM.Node ()) where
parseJSON = withObject "Node" $ \v -> OSM.node
<$> v .: "id"
<*> v .: "tags"
<*> (OSM.Coordinates <$> v .: "lat" <*> v .: "lon")
instance ToJSON (OSM.Way ()) where
toJSON w = object
[ "type" .= String "way"
, "id" .= OSM.id w
, "tags" .= OSM.tags w
, "nodes" .= OSM.nodeIDs w ]
instance FromJSON (OSM.Way ()) where
parseJSON = withObject "Way" $ \v -> OSM.way
<$> v .: "id"
<*> v .: "tags"
<*> v .: "nodes"
instance ToJSON (OSM.Relation ()) where
toJSON r = object
[ "type" .= String "relation"
, "id" .= OSM.id r
, "tags" .= OSM.tags r
, "members" .= OSM.members r ]
| kolen/ptwatch | src/OSM/OverpassJSON.hs | bsd-2-clause | 3,187 | 0 | 18 | 716 | 1,095 | 552 | 543 | 76 | 0 |
{-# LANGUAGE DeriveDataTypeable #-}
module WebToInk.Converter.Exceptions where
import Control.Exception (Exception)
import Data.Typeable
data ConverterException = TableOfContentsCouldNotBeDownloadedException | UnknownDownloadException | KindlegenException Int
deriving (Show, Typeable)
instance Exception ConverterException
| thlorenz/WebToInk | webtoink-converter/WebToInk/Converter/Exceptions.hs | bsd-2-clause | 336 | 0 | 6 | 39 | 56 | 33 | 23 | 7 | 0 |
{-# LANGUAGE OverloadedStrings #-}
module Web.Slack.IncomingWebhook (
sendMessage
) where
import Control.Monad (void)
import Data.Aeson (encode, toJSON)
import Network (withSocketsDo)
import Network.Wreq
import Web.Slack.IncomingWebhook.Attachment (Attachment)
import Web.Slack.IncomingWebhook.Message (defMessage, Message)
sendMessage :: String -> Message -> IO ()
sendMessage url message = void $ post url (toJSON message)
| ryota-ka/yo-slack-adapter | src/Web/Slack/IncomingWebhook.hs | bsd-3-clause | 434 | 0 | 8 | 56 | 119 | 70 | 49 | 11 | 1 |
module SimpleSolve (
solve
) where
import SudokuAbstract
import Data.Maybe
-- |Very basic alghoritm of solving a sudoku.
solve :: (SudokuBoard b, SudokuSquare s, SudokuValue v) => b (s v) -> Maybe (b (s v))
solve board = solve' board (0,0)
where
solve' board (0,9) = Just board
solve' board index = if valKnown board index
then solve' board (nextIndex index)
else loop (possibilities board index) board index
where
loop [] board index = Nothing
loop (x:xs) board index = if isJust solve
then solve
else loop xs board index
where
newBoard = replace board index (known x)
solve = solve' newBoard (nextIndex index)
| Solpatium/Haskell-Sudoku | src/SimpleSolve.hs | bsd-3-clause | 1,108 | 0 | 13 | 600 | 250 | 129 | 121 | 16 | 5 |
{-# LANGUAGE MultiParamTypeClasses, GeneralizedNewtypeDeriving, ScopedTypeVariables, DeriveDataTypeable, RecordWildCards #-}
-- | Both System.Directory and System.Environment wrappers
module Development.Shake.Directory(
doesFileExist, doesDirectoryExist,
getDirectoryContents, getDirectoryFiles, getDirectoryDirs,
getEnv,
removeFiles, removeFilesAfter,
defaultRuleDirectory
) where
import Control.Exception
import Control.Monad
import Control.Monad.IO.Class
import System.IO.Error
import Data.Binary
import Data.List
import Data.Maybe
import qualified System.Directory as IO
import qualified System.Environment as IO
import Development.Shake.Core
import Development.Shake.Classes
import Development.Shake.FilePath
import Development.Shake.FilePattern
newtype DoesFileExistQ = DoesFileExistQ FilePath
deriving (Typeable,Eq,Hashable,Binary,NFData)
instance Show DoesFileExistQ where
show (DoesFileExistQ a) = "Exists? " ++ a
newtype DoesFileExistA = DoesFileExistA Bool
deriving (Typeable,Eq,Hashable,Binary,NFData)
instance Show DoesFileExistA where
show (DoesFileExistA a) = show a
newtype DoesDirectoryExistQ = DoesDirectoryExistQ FilePath
deriving (Typeable,Eq,Hashable,Binary,NFData)
instance Show DoesDirectoryExistQ where
show (DoesDirectoryExistQ a) = "Exists dir? " ++ a
newtype DoesDirectoryExistA = DoesDirectoryExistA Bool
deriving (Typeable,Eq,Hashable,Binary,NFData)
instance Show DoesDirectoryExistA where
show (DoesDirectoryExistA a) = show a
newtype GetEnvQ = GetEnvQ String
deriving (Typeable,Eq,Hashable,Binary,NFData)
instance Show GetEnvQ where
show (GetEnvQ a) = "getEnv " ++ a
newtype GetEnvA = GetEnvA (Maybe String)
deriving (Typeable,Eq,Hashable,Binary,NFData)
instance Show GetEnvA where
show (GetEnvA a) = fromMaybe "<unset>" a
data GetDirectoryQ
= GetDir {dir :: FilePath}
| GetDirFiles {dir :: FilePath, pat :: [FilePattern]}
| GetDirDirs {dir :: FilePath}
deriving (Typeable,Eq)
newtype GetDirectoryA = GetDirectoryA [FilePath]
deriving (Typeable,Show,Eq,Hashable,Binary,NFData)
instance Show GetDirectoryQ where
show (GetDir x) = "Listing " ++ x
show (GetDirFiles a b) = "Files " ++ a </> ['{'|m] ++ unwords b ++ ['}'|m]
where m = length b > 1
show (GetDirDirs x) = "Dirs " ++ x
instance NFData GetDirectoryQ where
rnf (GetDir a) = rnf a
rnf (GetDirFiles a b) = rnf a `seq` rnf b
rnf (GetDirDirs a) = rnf a
instance Hashable GetDirectoryQ where
hashWithSalt salt = hashWithSalt salt . f
where f (GetDir x) = (0 :: Int, x, [])
f (GetDirFiles x y) = (1, x, y)
f (GetDirDirs x) = (2, x, [])
instance Binary GetDirectoryQ where
get = do
i <- getWord8
case i of
0 -> liftM GetDir get
1 -> liftM2 GetDirFiles get get
2 -> liftM GetDirDirs get
put (GetDir x) = putWord8 0 >> put x
put (GetDirFiles x y) = putWord8 1 >> put x >> put y
put (GetDirDirs x) = putWord8 2 >> put x
instance Rule DoesFileExistQ DoesFileExistA where
storedValue (DoesFileExistQ x) = fmap (Just . DoesFileExistA) $ IO.doesFileExist x
-- invariant _ = True
instance Rule DoesDirectoryExistQ DoesDirectoryExistA where
storedValue (DoesDirectoryExistQ x) = fmap (Just . DoesDirectoryExistA) $ IO.doesDirectoryExist x
-- invariant _ = True
instance Rule GetEnvQ GetEnvA where
storedValue (GetEnvQ x) = fmap (Just . GetEnvA) $ getEnvIO x
-- invariant _ = True
instance Rule GetDirectoryQ GetDirectoryA where
storedValue x = fmap Just $ getDir x
-- invariant _ = True
-- | This function is not actually exported, but Haddock is buggy. Please ignore.
defaultRuleDirectory :: Rules ()
defaultRuleDirectory = do
defaultRule $ \(DoesFileExistQ x) -> Just $
liftIO $ fmap DoesFileExistA $ IO.doesFileExist x
defaultRule $ \(DoesDirectoryExistQ x) -> Just $
liftIO $ fmap DoesDirectoryExistA $ IO.doesDirectoryExist x
defaultRule $ \(x :: GetDirectoryQ) -> Just $
liftIO $ getDir x
defaultRule $ \(GetEnvQ x) -> Just $
liftIO $ fmap GetEnvA $ getEnvIO x
-- | Returns 'True' if the file exists.
doesFileExist :: FilePath -> Action Bool
doesFileExist file = do
DoesFileExistA res <- apply1 $ DoesFileExistQ file
return res
-- | Returns 'True' if the directory exists.
doesDirectoryExist :: FilePath -> Action Bool
doesDirectoryExist file = do
DoesDirectoryExistA res <- apply1 $ DoesDirectoryExistQ file
return res
-- | Return 'Just' the value of the environment variable, or 'Nothing'
-- if the variable is not set.
getEnv :: String -> Action (Maybe String)
getEnv var = do
GetEnvA res <- apply1 $ GetEnvQ var
return res
getEnvIO :: String -> IO (Maybe String)
getEnvIO x = Control.Exception.catch (fmap Just $ IO.getEnv x) $
\e -> if isDoesNotExistError e then return Nothing else ioError e
-- | Get the contents of a directory. The result will be sorted, and will not contain
-- the entries @.@ or @..@ (unlike the standard Haskell version). The resulting paths will be relative
-- to the first argument.
--
-- It is usually simpler to call either 'getDirectoryFiles' or 'getDirectoryDirs'.
getDirectoryContents :: FilePath -> Action [FilePath]
getDirectoryContents x = getDirAction $ GetDir x
-- | Get the files anywhere under a directory that match any of a set of patterns.
-- For the interpretation of the patterns see '?=='. All results will be
-- relative to the 'FilePath' argument. Some examples:
--
-- > getDirectoryFiles "Config" ["//*.xml"]
-- > -- All .xml files anywhere under the Config directory
-- > -- If Config/foo/bar.xml exists it will return ["foo/bar.xml"]
-- > getDirectoryFiles "Modules" ["*.hs","*.lhs"]
-- > -- All .hs or .lhs in the Modules directory
-- > -- If Modules/foo.hs and Modules/foo.lhs exist, it will return ["foo.hs","foo.lhs"]
--
-- If you require a qualified file name it is often easier to use @\"\"@ as 'FilePath' argument,
-- for example the following two expressions are equivalent:
--
-- > fmap (map ("Config" </>)) (getDirectoryFiles "Config" ["//*.xml"])
-- > getDirectoryFiles "" ["Config//*.xml"]
getDirectoryFiles :: FilePath -> [FilePattern] -> Action [FilePath]
getDirectoryFiles x f = getDirAction $ GetDirFiles x f
-- | Get the directories in a directory, not including @.@ or @..@.
-- All directories are relative to the argument directory.
--
-- > getDirectoryDirs "/Users"
-- > -- Return all directories in the /Users directory
-- > -- e.g. ["Emily","Henry","Neil"]
getDirectoryDirs :: FilePath -> Action [FilePath]
getDirectoryDirs x = getDirAction $ GetDirDirs x
getDirAction x = do GetDirectoryA y <- apply1 x; return y
contents :: FilePath -> IO [FilePath]
-- getDirectoryContents "" is equivalent to getDirectoryContents "." on Windows,
-- but raises an error on Linux. We smooth out the difference.
contents x = fmap (filter $ not . all (== '.')) $ IO.getDirectoryContents $ if x == "" then "." else x
answer :: [FilePath] -> GetDirectoryA
answer = GetDirectoryA . sort
getDir :: GetDirectoryQ -> IO GetDirectoryA
getDir GetDir{..} = fmap answer $ contents dir
getDir GetDirDirs{..} = fmap answer $ filterM f =<< contents dir
where f x = IO.doesDirectoryExist $ dir </> x
getDir GetDirFiles{..} = fmap answer $ concatMapM f $ directories pat
where
test = let ps = map (?==) pat in \x -> any ($ x) ps
f (dir2,False) = do
xs <- fmap (map (dir2 </>)) $ contents $ dir </> dir2
flip filterM xs $ \x -> if not $ test x then return False else fmap not $ IO.doesDirectoryExist $ dir </> x
f (dir2,True) = do
xs <- fmap (map (dir2 </>)) $ contents $ dir </> dir2
(dirs,files) <- partitionM (\x -> IO.doesDirectoryExist $ dir </> x) xs
rest <- concatMapM (\d -> f (d, True)) dirs
return $ filter test files ++ rest
concatMapM f xs = fmap concat $ mapM f xs
partitionM f [] = return ([], [])
partitionM f (x:xs) = do
t <- f x
(a,b) <- partitionM f xs
return $ if t then (x:a,b) else (a,x:b)
-- | Remove all empty directories and files that match any of the patterns beneath a directory.
-- Some examples:
--
-- @
-- 'removeFiles' \"output\" [\"\/\/*\"]
-- 'removeFiles' \".\" [\"\/\/*.hi\",\"\/\/*.o\"]
-- @
--
-- This function is often useful when writing a @clean@ action for your build system,
-- often as a 'phony' rule.
removeFiles :: FilePath -> [FilePattern] -> IO ()
removeFiles dir ["//*"] = IO.removeDirectoryRecursive dir -- optimisation
removeFiles dir pat = f "" >> return ()
where
test = let ps = map (?==) pat in \x -> any ($ x) ps
-- dir </> dir2 is the part to operate on, return True if you cleaned everything
f :: FilePath -> IO Bool
f dir2 = do
xs <- fmap (map (dir2 </>)) $ contents $ dir </> dir2
(dirs,files) <- partitionM (\x -> IO.doesDirectoryExist $ dir </> x) xs
noDirs <- fmap and $ mapM f dirs
let (del,keep) = partition test files
mapM_ IO.removeFile $ map (dir </>) del
let die = noDirs && null keep
when die $ IO.removeDirectory $ dir </> dir2
return die
-- | Remove files, like 'removeFiles', but executed after the build completes successfully.
-- Useful for implementing @clean@ actions that delete files Shake may have open for building.
removeFilesAfter :: FilePath -> [FilePattern] -> Action ()
removeFilesAfter a b = runAfter $ removeFiles a b
| nh2/shake | Development/Shake/Directory.hs | bsd-3-clause | 9,619 | 0 | 16 | 2,090 | 2,609 | 1,351 | 1,258 | 155 | 3 |
module MainSpec where
import Test.Hspec
spec :: Spec
spec = do return ()
-- describe "main" $ do
-- it "works" $ do
-- 1 `shouldBe` 1
| mg50/hypnerotomachia | test/MainSpec.hs | bsd-3-clause | 154 | 0 | 8 | 47 | 30 | 18 | 12 | 4 | 1 |
{-# OPTIONS_GHC -fno-warn-missing-signatures -fno-warn-type-defaults #-}
module Colors (
red, green, yellow, blue,
magenta, cyan, white2, white, black,
bright, dim, underline, blink, Colors.reverse, hidden
) where
bright string = "\ESC[1m" ++ string ++ "\ESC[0m"
dim string = "\ESC[2m" ++ string ++ "\ESC[0m"
underline string = "\ESC[4m" ++ string ++ "\ESC[0m"
blink string = "\ESC[5m" ++ string ++ "\ESC[0m"
reverse string = "\ESC[7m" ++ string ++ "\ESC[0m"
hidden string = "\ESC[8m" ++ string ++ "\ESC[0m"
black string = "\ESC[30m" ++ string ++ "\ESC[0m"
red string = "\ESC[31m" ++ string ++ "\ESC[0m"
green string = "\ESC[32m" ++ string ++ "\ESC[0m"
yellow string = "\ESC[33m" ++ string ++ "\ESC[0m"
blue string = "\ESC[34m" ++ string ++ "\ESC[0m"
magenta string = "\ESC[35m" ++ string ++ "\ESC[0m" --AKA purple
cyan string = "\ESC[36m" ++ string ++ "\ESC[0m" --AKA aqua
white string = "\ESC[37m" ++ string ++ "\ESC[0m"
white2 string = "\ESC[38m" ++ string ++ "\ESC[0m"
| blockapps/mgit | src/Colors.hs | bsd-3-clause | 991 | 0 | 6 | 167 | 296 | 158 | 138 | 20 | 1 |
{-# OPTIONS -cpp #-}
-- OPTIONS required for ghc-6.4.x compat, and must appear first
{-# LANGUAGE CPP #-}
{-# OPTIONS_GHC -cpp #-}
{-# OPTIONS_NHC98 -cpp #-}
{-# OPTIONS_JHC -fcpp #-}
-- #hide
module Distribution.Compat.CopyFile (
copyFile,
copyOrdinaryFile,
copyExecutableFile,
setFileOrdinary,
setFileExecutable,
) where
#ifdef __GLASGOW_HASKELL__
import Control.Monad
( when )
import Control.Exception
( bracket, bracketOnError )
import Distribution.Compat.Exception
( catchIO )
#if __GLASGOW_HASKELL__ >= 608
import Distribution.Compat.Exception
( throwIOIO )
import System.IO.Error
( ioeSetLocation )
#endif
import System.Directory
( renameFile, removeFile )
import Distribution.Compat.TempFile
( openBinaryTempFile )
import System.FilePath
( takeDirectory )
import System.IO
( openBinaryFile, IOMode(ReadMode), hClose, hGetBuf, hPutBuf )
import Foreign
( allocaBytes )
#endif /* __GLASGOW_HASKELL__ */
#ifndef mingw32_HOST_OS
import System.Posix.Types
( FileMode )
import System.Posix.Internals
( c_chmod )
import Foreign.C
( withCString )
#if __GLASGOW_HASKELL__ >= 608
import Foreign.C
( throwErrnoPathIfMinus1_ )
#else
import Foreign.C
( throwErrnoIfMinus1_ )
#endif
#endif /* mingw32_HOST_OS */
copyOrdinaryFile, copyExecutableFile :: FilePath -> FilePath -> IO ()
copyOrdinaryFile src dest = copyFile src dest >> setFileOrdinary dest
copyExecutableFile src dest = copyFile src dest >> setFileExecutable dest
setFileOrdinary, setFileExecutable :: FilePath -> IO ()
#ifndef mingw32_HOST_OS
setFileOrdinary path = setFileMode path 0o644 -- file perms -rw-r--r--
setFileExecutable path = setFileMode path 0o755 -- file perms -rwxr-xr-x
setFileMode :: FilePath -> FileMode -> IO ()
setFileMode name m =
withCString name $ \s -> do
#if __GLASGOW_HASKELL__ >= 608
throwErrnoPathIfMinus1_ "setFileMode" name (c_chmod s m)
#else
throwErrnoIfMinus1_ name (c_chmod s m)
#endif
#else
setFileOrdinary _ = return ()
setFileExecutable _ = return ()
#endif
copyFile :: FilePath -> FilePath -> IO ()
#ifdef __GLASGOW_HASKELL__
copyFile fromFPath toFPath =
copy
#if __GLASGOW_HASKELL__ >= 608
`catchIO` (\ioe -> throwIOIO (ioeSetLocation ioe "copyFile"))
#endif
where copy = bracket (openBinaryFile fromFPath ReadMode) hClose $ \hFrom ->
bracketOnError openTmp cleanTmp $ \(tmpFPath, hTmp) ->
do allocaBytes bufferSize $ copyContents hFrom hTmp
hClose hTmp
renameFile tmpFPath toFPath
openTmp = openBinaryTempFile (takeDirectory toFPath) ".copyFile.tmp"
cleanTmp (tmpFPath, hTmp) = do
hClose hTmp `catchIO` \_ -> return ()
removeFile tmpFPath `catchIO` \_ -> return ()
bufferSize = 4096
copyContents hFrom hTo buffer = do
count <- hGetBuf hFrom buffer bufferSize
when (count > 0) $ do
hPutBuf hTo buffer count
copyContents hFrom hTo buffer
#else
copyFile fromFPath toFPath = readFile fromFPath >>= writeFile toFPath
#endif
| dcreager/cabal | Distribution/Compat/CopyFile.hs | bsd-3-clause | 3,255 | 0 | 14 | 812 | 657 | 364 | 293 | 31 | 1 |
{-# LANGUAGE OverloadedStrings #-}
{-# LANGUAGE ScopedTypeVariables #-}
{-# LANGUAGE MultiWayIf #-}
-- todaybot
-- ben clifford benc@hawaga.org.uk
-- used https://www.fpcomplete.com/school/to-infinity-and-beyond/competition-winners/interfacing-with-restful-json-apis
-- as a tutorial
import Prelude hiding (mapM_)
import Control.Applicative ( many )
import Control.Concurrent (threadDelay)
import Control.Exception (catch, SomeException (..) )
import Control.Lens
import Control.Monad hiding (mapM_)
import Data.Maybe (fromMaybe)
import Data.Time.Calendar
import Data.Time.Clock
import Data.Time.LocalTime
import Data.Foldable (mapM_)
import Data.Aeson ( Value(..) )
import Data.Aeson.Lens (key, _Bool, _String, _Array)
import Data.Yaml (decodeFile)
import System.IO (hPutStrLn, stderr)
import Network.Wreq (auth,
basicAuth,
defaults,
getWith,
header,
param,
postWith,
responseBody,
Part )
import Data.Monoid ( (<>) )
import qualified Control.Lens.Getter as Getter
import qualified Text.Parsec as P
import qualified Data.Vector as V
import qualified Data.Text as T
import qualified Data.Text.IO as T
import qualified Data.Text.Encoding as TE
import qualified Data.ByteString as BSS
import qualified Data.ByteString.Char8 as BSS8
data Configuration = Configuration {
username :: T.Text,
password :: T.Text,
app_id :: BSS.ByteString,
app_secret :: BSS.ByteString
}
type BearerToken = T.Text
main = do
progress "todaybot"
configuration <- readConfiguration
forever $ do
skipExceptions $ mainLoop configuration
sleep 13
mainLoop configuration = do
bearerToken <- authenticate configuration
posts <- getHotPosts bearerToken
mapM_ (skipExceptions . (processPost bearerToken)) posts
progress "Pass completed."
skipExceptions a = a `catch` \(e :: SomeException) -> progress $ "Exception: " <> (show e)
userAgentHeader = header "User-Agent" .~ ["lsc-todaybot by u/benclifford"]
authorizationHeader bearerToken = header "Authorization" .~ ["bearer " <> (TE.encodeUtf8 bearerToken)]
authenticate :: Configuration -> IO BearerToken
authenticate configuration = do
progress "Authenticating"
let opts = defaults
& userAgentHeader
& param "grant_type" .~ ["password"]
& param "username" .~ [username configuration]
& param "password" .~ [password configuration]
& auth ?~ basicAuth (app_id configuration) (app_secret configuration)
resp <- postWith opts ("https://www.reddit.com/api/v1/access_token") ([] :: [Part])
return $ resp ^. responseBody . key "access_token" . _String
hotPostsUrl = "https://oauth.reddit.com/r/LondonSocialClub/hot?limit=100"
getHotPosts :: BearerToken -> IO (V.Vector Value)
getHotPosts bearerToken = do
progress "Getting hot posts"
let opts = defaults
& authorizationHeader bearerToken
& userAgentHeader
resp <- getWith opts hotPostsUrl
return $ resp ^. responseBody . key "data" . key "children" . _Array
readConfiguration = do
configYaml :: Value <- fromMaybe (error "Cannot parse config file") <$> decodeFile "secrets.yaml"
return $ Configuration {
username = configYaml ^. key "username" . _String,
password = configYaml ^. key "password" . _String,
app_id = configYaml ^. key "app_id" . _ByteString,
app_secret = configYaml ^. key "app_secret" . _ByteString
}
_ByteString = _String . Getter.to (T.unpack) . Getter.to (BSS8.pack)
processPost bearerToken post = do
let kind = post ^. postKind
let i = post ^. postId
let fullname = kind <> "_" <> i
let flair_text = post ^. postFlairText
let flair_css = post ^. postFlairCss
let title = post ^. postTitle
let stickied = fromMaybe False $ post ^? key "data" . key "stickied" . _Bool
T.putStr $ fullname <> ": " <> title <> " [" <> flair_text <> "/" <> flair_css <> "]"
when stickied $ T.putStr " [Stickied]"
T.putStrLn ""
-- if flair has been modified (other than to Today) then
-- stay away...
let changeableFlair = flair_text == "Today" || flair_text == ""
progress $ " Changeable flair? " <> (show changeableFlair)
when changeableFlair $ do
-- today?
-- can we parse the date out of the subject line?
-- let's use parsec...
let parsedDate = P.parse datedSubjectLine "Post title" title
case parsedDate of
Right postDate -> do
progress $ " Post date is " <> (show postDate)
now <- localDay <$> getCurrentLocalTime
-- posts move through a sequence of no flair, then today,
-- then archived, except we do not archive stickied posts
-- because that looks weird with a greyed out post being stickied.
-- I'm unsure if the right thing to do is unsticky and archive or
-- to leave stickied, with the today flair substituted back to
-- nothing - then if someone unstickies, it will get archived flair
-- in a future run.
if | postDate > now -> progress $ " Skipping: Post is in future"
| postDate == now -> forceFlair bearerToken post "Today" "today"
| postDate < now && not stickied -> forceFlair bearerToken post "Archived" "archived"
| postDate < now && stickied -> forceFlair bearerToken post "" ""
Left e -> progress $ " Skipping: Date did not parse: " <> (show e)
let interestCheck = (T.toCaseFold "[Interest") `T.isPrefixOf` (T.toCaseFold title)
progress $ " Interest check? " <> (show interestCheck)
when interestCheck $ forceFlair bearerToken post "Interest Check" "interestcheck"
-- because we love the royal george
{-
when ( kind == "t3"
&& "Royal George" `T.isInfixOf` title ) $ do
putStrLn "Royal george matched!"
forceFlair bearerToken post "ROYAL GEORGE" ""
-}
-- parser for subject line dates...
-- expecting (from automod config)
-- e (regex): "\\[([0-9]{1,2}[/.-][0-9]{1,2}[/.-]([0-9]{2}|[0-9]{4})|interest( check)?)\\].*"
datedSubjectLine = do
many $ P.noneOf "["
d <- dateBlock
return d
dateBlock = do
P.char '['
day <- dateComponent
dateSeparator
month <- dateComponent
dateSeparator
year <- yearComponent
P.char ']'
return $ fromGregorian year month day
dateSeparator = P.oneOf "/-.\\"
dateComponent = read <$> digits
yearComponent = (normaliseYear . read) <$> digits
digits = (P.many $ P.oneOf "0123456789")
normaliseYear year =
case () of
_ | year > 2000 -> year
_ | year >= 0 && year < 100 -> 2000 + year -- hello, 2100!
_ -> error $ "Cannot normalise year " ++ show year
getCurrentLocalTime = do
nowUTC <- getCurrentTime
tz <- getCurrentTimeZone
return $ utcToLocalTime tz nowUTC
postKind = key "kind" . _String
postId = key "data" . key "id" . _String
postFlairText = key "data" . key "link_flair_text" . _String
postFlairCss = key "data" . key "link_flair_css_class" . _String
postTitle = key "data" . key "title" . _String
forceFlair bearerToken post forced_flair forced_flair_css = do
let kind = post ^. postKind
let i = post ^. postId
let fullname = kind <> "_" <> i
T.putStrLn $ " Setting flair for " <> fullname <> " to " <> forced_flair <> " if necessary"
let flair_text = post ^. postFlairText
let flair_css = post ^. postFlairCss
if flair_text == forced_flair && flair_css == forced_flair_css
then progress " No flair change necessary"
else do progress " Updating flair"
let opts = defaults
& authorizationHeader bearerToken
& param "api_type" .~ ["json"]
& param "link" .~ [fullname]
& param "text" .~ [forced_flair]
& param "css_class" .~ [forced_flair_css]
postWith opts "https://oauth.reddit.com/r/LondonSocialClub/api/flair" ([] :: [Part])
-- TODO check if successful
return ()
progress s = hPutStrLn stderr s
-- | sleeps for specified number of minutes
sleep mins = threadDelay (mins * 60 * 1000000)
| benclifford/lsc-todaybot | Main.hs | bsd-3-clause | 8,115 | 0 | 22 | 1,893 | 2,034 | 1,034 | 1,000 | -1 | -1 |
module Code04 where
import Data.Array
-- Sample data
sX,sY :: Int -> [Int]
sX n = take n [ 2*x | x <- [0..], x `mod` 3 /= 0 ]
sY n = take n [ 3*x | x <- [0..]]
aX,aY :: Int -> Array Int Int
aX n = listArray (0,n) (sX n)
aY n = listArray (0,n) (sY n)
sX10 = sX 10
sX100 = sX 100
sX1000 = sX 1000
sX10000 = sX 10000
sX100000 = sX 100000
sX1000000 = sX 1000000
sX10000000 = sX 10000000
sY10 = sY 10
sY100 = sY 100
sY1000 = sY 1000
sY10000 = sY 10000
sY100000 = sY 100000
sY1000000 = sY 1000000
sY10000000 = sY 10000000
aX10 = aX 10
aX100 = aX 100
aX1000 = aX 1000
aX10000 = aX 10000
aX100000 = aX 100000
aX1000000 = aX 1000000
aX10000000 = aX 10000000
aY10 = aY 10
aY100 = aY 100
aY1000 = aY 1000
aY10000 = aY 10000
aY100000 = aY 100000
aY1000000 = aY 1000000
aY10000000 = aY 10000000
-- Specification
smallest0 :: Ord a => Int -> ([a],[a]) -> a
smallest0 k (xs,ys) = union (xs,ys) !! k
union :: Ord a => ([a],[a]) -> [a]
union (xs,[]) = xs
union ([],ys) = ys
union (x:xs,y:ys) | x < y = x : union (xs,y:ys)
| x > y = y : union (x:xs,ys)
-- Implementation
smallest :: Ord a => Int -> ([a],[a]) -> a
smallest k ([],ws) = ws !! k
smallest k (zs,[]) = zs !! k
smallest k (zs,ws)
= case (a < b, k <= p + q) of
(True,True) -> smallest k (zs,us)
(True,False) -> smallest (k-p-1) (ys,ws)
(False,True) -> smallest k (xs,ws)
(False,False) -> smallest (k-q-1) (zs,vs)
where
p = length zs `div` 2
q = length ws `div` 2
(xs,a:ys) = splitAt p zs
(us,b:vs) = splitAt q ws
-- Implementation using Array
smallestA :: Ord a => Int -> (Array Int a,Array Int a) -> a
smallestA k (xa,ya) = search k (0,m+1) (0,n+1)
where
(0,m) = bounds xa
(0,n) = bounds ya
search k (lx,rx) (ly,ry)
| lx == rx = ya ! (k+ly)
| ly == ry = xa ! (k+lx)
| otherwise = case (xa ! mx < ya ! my, k <= mx-lx + my-ly) of
(True,True) -> search k (lx,rx) (ly,my)
(True,False) -> search (k-(mx-lx)-1) (mx+1,rx) (ly,ry)
(False,True) -> search k (lx,mx) (ly,ry)
(False,False) -> search (k-(my-ly)-1) (lx,rx) (my+1,ry)
where
mx = (lx+rx) `div` 2
my = (ly+ry) `div` 2
| sampou-org/pfad | Code/Code04.hs | bsd-3-clause | 2,389 | 0 | 16 | 798 | 1,329 | 720 | 609 | 70 | 4 |
{-# LANGUAGE FlexibleContexts #-}
{-# LANGUAGE FlexibleInstances #-}
{-# LANGUAGE MultiParamTypeClasses #-}
{-# LANGUAGE Rank2Types #-}
module Milib.Contest
( contestMain
, hContestMain
, hContestMainN
, parserWithoutError
, parsec
, gcjParsec
, gcjPrinter
, gcjPrinterLn
) where
import Control.Applicative
import Control.Monad
import qualified Data.ByteString.Lazy.Char8 as BS
import Data.Functor.Identity
import System.IO
import qualified Text.Parsec.ByteString.Lazy
import Text.Parsec.Error (ParseError)
import Text.Parsec.Prim (runParser, Parsec, Stream, (<?>), uncons)
import Text.Printf (hPrintf)
import Milib.IO
type Printer b = Handle -> b -> IO ()
type Solver a b = a -> b
type Parser err a = Handle -> IO (Either err a)
type ParsecParser a = Stream BS.ByteString Identity Char => Parsec BS.ByteString () a
type CMain err a b = Parser err a -> Solver a b -> Printer b -> IO ()
type HCMain err a b = Handle -> Handle -> CMain err a b
contestMain :: Show err => CMain err a b
contestMain = hContestMain stdin stdout
hContestMain :: Show err => HCMain err a b
hContestMain hin hout parser solver printer = do
input <- parser hin
case input of
Left err -> do { hPutStr stderr "parse error: "; hPrint stderr err }
Right x -> printer hout $ solver x
hContestMainN :: Show err => Int -> HCMain err a b
hContestMainN n hin hout printer solver parser = mapM_ f [1..n]
where
f _ = hContestMain hin hout printer solver parser
parserWithoutError :: (Handle -> IO a) -> Parser ParseError a
parserWithoutError f h = Right <$> f h
parsec :: ParsecParser a -> Parser ParseError a
parsec p hin = runParser p () "" <$> BS.hGetContents hin
gcjParsec :: ParsecParser a -> Parser ParseError [a]
gcjParsec p = parsec (p' <?> "gcjParsec")
where
p' = do
t <- number
spaces
count t p
gcjPrinter :: Printer b -> Printer [b]
gcjPrinter p h xs = mapM_ f $ zip xs ([1..] :: [Int])
where
f (x, i) = do
hPrintf h "Case #$d: " i
p h x
gcjPrinterLn :: Printer b -> Printer [b]
gcjPrinterLn p h xs = mapM_ f $ zip xs ([1..] :: [Int])
where
f (x, i) = do
hPrintf h "Case #%d:\n" i
p h x
-- vim: set expandtab:
| mkut/milib_haskell | src/Milib/Contest.hs | bsd-3-clause | 2,241 | 0 | 12 | 527 | 801 | 421 | 380 | 60 | 2 |
{-# LANGUAGE ScopedTypeVariables #-}
module Problem11
( maxProductInMatrix
, maxProductInList
, diagonalsLR
, diagonalsRL
, cutFirstColumn
, upperRightHalf
) where
import Data.List as L
import Lib (slices)
type SliceProduct = (Int, [Int])
-- further relying on `Ord a => (a,a)` being an instance
-- of `Ord` as well (compared by first element first)
n = 4
matrix = [
[08, 02, 22, 97, 38, 15, 00, 40, 00, 75, 04, 05, 07, 78, 52, 12, 50, 77, 91, 08],
[49, 49, 99, 40, 17, 81, 18, 57, 60, 87, 17, 40, 98, 43, 69, 48, 04, 56, 62, 00],
[81, 49, 31, 73, 55, 79, 14, 29, 93, 71, 40, 67, 53, 88, 30, 03, 49, 13, 36, 65],
[52, 70, 95, 23, 04, 60, 11, 42, 69, 24, 68, 56, 01, 32, 56, 71, 37, 02, 36, 91],
[22, 31, 16, 71, 51, 67, 63, 89, 41, 92, 36, 54, 22, 40, 40, 28, 66, 33, 13, 80],
[24, 47, 32, 60, 99, 03, 45, 02, 44, 75, 33, 53, 78, 36, 84, 20, 35, 17, 12, 50],
[32, 98, 81, 28, 64, 23, 67, 10, 26, 38, 40, 67, 59, 54, 70, 66, 18, 38, 64, 70],
[67, 26, 20, 68, 02, 62, 12, 20, 95, 63, 94, 39, 63, 08, 40, 91, 66, 49, 94, 21],
[24, 55, 58, 05, 66, 73, 99, 26, 97, 17, 78, 78, 96, 83, 14, 88, 34, 89, 63, 72],
[21, 36, 23, 09, 75, 00, 76, 44, 20, 45, 35, 14, 00, 61, 33, 97, 34, 31, 33, 95],
[78, 17, 53, 28, 22, 75, 31, 67, 15, 94, 03, 80, 04, 62, 16, 14, 09, 53, 56, 92],
[16, 39, 05, 42, 96, 35, 31, 47, 55, 58, 88, 24, 00, 17, 54, 24, 36, 29, 85, 57],
[86, 56, 00, 48, 35, 71, 89, 07, 05, 44, 44, 37, 44, 60, 21, 58, 51, 54, 17, 58],
[19, 80, 81, 68, 05, 94, 47, 69, 28, 73, 92, 13, 86, 52, 17, 77, 04, 89, 55, 40],
[04, 52, 08, 83, 97, 35, 99, 16, 07, 97, 57, 32, 16, 26, 26, 79, 33, 27, 98, 66],
[88, 36, 68, 87, 57, 62, 20, 72, 03, 46, 33, 67, 46, 55, 12, 32, 63, 93, 53, 69],
[04, 42, 16, 73, 38, 25, 39, 11, 24, 94, 72, 18, 08, 46, 29, 32, 40, 62, 76, 36],
[20, 69, 36, 41, 72, 30, 23, 88, 34, 62, 99, 69, 82, 67, 59, 85, 74, 04, 36, 16],
[20, 73, 35, 29, 78, 31, 90, 01, 74, 31, 49, 71, 48, 86, 81, 16, 23, 57, 05, 54],
[01, 70, 54, 71, 83, 51, 54, 69, 16, 92, 33, 48, 61, 43, 52, 01, 89, 19, 67, 48]]
maxProductInMatrix :: (Int, [Int])
maxProductInMatrix =
let allSeies = matrix ++ (L.transpose matrix) ++ (diagonalsLR matrix) ++ (diagonalsRL matrix)
maximumsInSeries = map maxProductInList allSeies
in L.maximum maximumsInSeries
diagonalsLR :: [[Int]] -> [[Int]]
diagonalsLR [] = []
diagonalsLR (xs:xss) = (diagsFromUpperHalf (xs:xss)) ++ diagonalsLR xss
where diagsFromUpperHalf :: [[Int]] -> [[Int]]
diagsFromUpperHalf xss = let upperHalf = upperRightHalf xss
allDiags = L.transpose upperHalf
in filter ((n<=).length) allDiags
diagonalsRL :: [[Int]] -> [[Int]]
diagonalsRL xss = diagonalsLR $ map L.reverse xss
-- |Cuts the matrix diagonally from (0,0) and produces its upper "half"
-- 1 2 3 1 2 3 1 2 3
-- 4 5 6 => 5 6 which is in fact 5 6
-- 7 8 9 9 9
upperRightHalf :: [[Int]] -> [[Int]]
upperRightHalf [xs] = [xs]
upperRightHalf (xs:xss) = xs : upperRightHalf (cutFirstColumn xss)
-- |Cuts off first column of a given matrix
-- 1 2 3 2 3
-- 4 5 6 => 5 6
-- 7 8 9 8 9
cutFirstColumn :: [[Int]] -> [[Int]]
cutFirstColumn xss = map tail xss
maxProductInList :: [Int] -> SliceProduct
maxProductInList xs =
let xss::[[Int]] = slices xs n
products::[SliceProduct] = map (\xs -> (product xs, xs)) xss
in L.maximum products
| candidtim/euler | src/Problem11.hs | bsd-3-clause | 3,483 | 0 | 14 | 915 | 1,773 | 1,128 | 645 | 57 | 1 |
{-# LANGUAGE OverloadedStrings, FlexibleContexts, PackageImports #-}
module Network.Sasl.External.Client (sasl) where
import "monads-tf" Control.Monad.State
import "monads-tf" Control.Monad.Error
import Data.Pipe
import qualified Data.ByteString as BS
import Network.Sasl
sasl :: (
MonadState m, SaslState (StateType m),
MonadError m, Error (ErrorType m) ) => (
BS.ByteString,
(Bool, Pipe (Either Success BS.ByteString) BS.ByteString m ()) )
sasl = ("EXTERNAL", client script)
script :: (
MonadState m, SaslState (StateType m),
MonadError m, Error (ErrorType m) ) => Client m
script = Client (Just clientMessage) [] Nothing
clientMessage :: (
MonadState m, SaslState (StateType m),
MonadError m, Error (ErrorType m) ) => Send m
clientMessage = do
st <- gets getSaslState
let Just username = lookup "username" st
return username
| YoshikuniJujo/sasl | src/Network/Sasl/External/Client.hs | bsd-3-clause | 846 | 28 | 11 | 131 | 307 | 168 | 139 | 24 | 1 |
{-# LANGUAGE ScopedTypeVariables #-}
module Yesod.Helpers.Logger where
import ClassyPrelude
import Data.Default (Default(..))
import qualified Data.Text as T
import Control.DeepSeq (force)
import Network.Wai.Logger (DateCacheGetter)
import Yesod.Core.Types
import System.FilePath (splitFileName, takeFileName)
import System.Directory (renameFile)
import qualified Text.Parsec.Number as PN
import Text.Parsec (parse, eof)
import qualified Text.Parsec
import Data.Conduit
import Data.Conduit.Combinators (sourceDirectory)
import System.Posix.Files (getFileStatus, fileSize)
import System.Posix.Types (COff(..))
import Control.Monad.Trans.Resource (runResourceT)
import Control.Monad.Logger
import System.Log.FastLogger
import Data.Aeson
import qualified Data.Aeson.Types as AT
import Yesod.Helpers.Aeson ( parseTextByRead, nullValueToNothing, parseSomeObjects
, parseTextByParsec
)
import Yesod.Helpers.Parsec ( parseByteSizeWithUnit )
type LoggingFunc = Loc -> LogSource -> LogLevel -> LogStr -> IO ()
class LoggingTRunner a where
runLoggingTWith :: a -> LoggingT m r -> m r
-- | Use a map to implement 'shouldLog' of Yesod class.
-- The logic is the following:
-- 1) Lookup the LogLevel according to the requested LogSource in the map,
-- If found, compare the requested LogLevel to the found one.
-- If not found, go to step 2.
-- 2) repeat step 1, but using a special LogSource "_".
-- If this failed too, the whole process failed.
-- see: shouldLogByLogSourceLevelMap
type LogSourceLevelMap = Map LogSource (Maybe LogLevel)
-- | Use this function to implement 'shouldLog' or 'shouldLogIO'
shouldLogByLogSourceLevelMap :: LogSourceLevelMap -> LogSource -> LogLevel -> Maybe Bool
shouldLogByLogSourceLevelMap the_map source level = do
fmap (fromMaybe False . fmap (level >=)) $
lookup source the_map <|> lookup "_" the_map
-- | A helper for reading LogSourceLevelMap from YAML.
-- Example YAML section:
-- min-log-level:
-- SQL : LevelWarn
-- _ : LevelDebug
parseLogSourceLevelMap :: Value -> AT.Parser LogSourceLevelMap
parseLogSourceLevelMap val = do
the_map :: HashMap _ _ <- parseJSON val
fmap mapFromList $ forM (mapToList the_map) $ \(k, v) -> do
let new_k = fromString k
new_v <- nullValueToNothing v >>= traverse (parseTextByRead "LogLevel")
return (new_k, new_v)
logLevelFromText :: Text -> LogLevel
logLevelFromText "debug" = LevelDebug
logLevelFromText "LevelDebug" = LevelDebug
logLevelFromText "info" = LevelInfo
logLevelFromText "LevelInfo" = LevelInfo
logLevelFromText "warning" = LevelWarn
logLevelFromText "warn" = LevelWarn
logLevelFromText "LevelWarn" = LevelWarn
logLevelFromText "error" = LevelError
logLevelFromText "LevelError" = LevelError
logLevelFromText x = LevelOther x
data LogDest = LogDestFile BufSize FilePath
| LogDestStdout BufSize
| LogDestStderr BufSize
deriving (Eq, Ord, Show)
newLoggerSetByDest :: LogDest -> IO LoggerSet
newLoggerSetByDest (LogDestStdout buf_size) = newStdoutLoggerSet buf_size
newLoggerSetByDest (LogDestStderr buf_size) = newStderrLoggerSet buf_size
newLoggerSetByDest (LogDestFile buf_size fp) = newFileLoggerSet buf_size fp
type LogArchiveAction = IO ()
type ShouldLogPred = LogSource -> LogLevel -> IO Bool
class LogStore a where
lxPushLogStr :: a -> LogStr -> IO ()
lxGetLoggerSet :: a -> LoggerSet
data SomeLogStore = forall h. LogStore h => SomeLogStore h
class ShouldLogPredicator a where
lxShouldLog :: a -> ShouldLogPred
data SomeShouldLogPredicator = forall a. ShouldLogPredicator a => SomeShouldLogPredicator a
shouldLogByLevel ::
LogLevel -- ^ minimum level
-> LogLevel -- ^ level to be tested
-> Bool
shouldLogByLevel (LevelOther lmin) (LevelOther lv) = lmin == lv
shouldLogByLevel x y = x <= y
shoudLogBySrcLevelMap ::
HashMap LogSource LogLevel
-> LogSource
-> LogLevel
-> Bool
shoudLogBySrcLevelMap hm src level =
case lookup src hm <|> lookup "*" hm of
Nothing -> False
Just req_level -> shouldLogByLevel req_level level
instance ShouldLogPredicator (HashMap LogSource LogLevel) where
lxShouldLog hm src level = return $ shoudLogBySrcLevelMap hm src level
instance LogStore LoggerSet where
lxPushLogStr = pushLogStr
lxGetLoggerSet = id
data LogFileAtMaxSize = LogFileAtMaxSize
Int64 -- max size
FilePath -- log file path
(IORef Int64) -- dest. file size, increase when push log
LoggerSet
instance LogStore LogFileAtMaxSize where
lxPushLogStr lf@(LogFileAtMaxSize max_sz _fp size_cnt ls) log_str = do
lxPushLogStr ls log_str
new_sz <- atomicModifyIORef' size_cnt $
\x -> let y = x + fromIntegral (logStrLength log_str) in force (y, y)
when (new_sz >= max_sz) $ do
renewLogFileAtMaxSize lf
lxGetLoggerSet (LogFileAtMaxSize _max_sz _fp _size_cnt ls) = ls
renewLogFileAtMaxSize :: LogFileAtMaxSize -> IO ()
renewLogFileAtMaxSize (LogFileAtMaxSize max_sz fp size_cnt ls) = do
let renew = do
flushLogStr ls
COff fsize <- fileSize <$> getFileStatus fp
if ( fsize > max_sz )
then do
cutLogFileThenArchive fp
writeIORef size_cnt 0
renewLoggerSet ls
else
writeIORef size_cnt fsize
renew `catchIOError` annotateRethrowIOError "renewLogFileAtMaxSize" Nothing (Just fp)
-- | create LogFileAtMaxSize
newLogFileAtMaxSize :: Int64 -> BufSize -> FilePath -> IO LogFileAtMaxSize
newLogFileAtMaxSize max_size buf_size fp =
handle (annotateRethrowIOError "newLogFileAtMaxSize" Nothing (Just fp)) $ do
logger_set <- newFileLoggerSet buf_size fp
COff fsize <- fileSize <$> getFileStatus fp
est_file_sz <- newIORef fsize
let lf = LogFileAtMaxSize max_size fp est_file_sz logger_set
when (fsize >= max_size) $ do
renewLogFileAtMaxSize lf
return lf
parseSomeLogStoreObj :: Maybe FilePath -> Object -> AT.Parser (IO SomeLogStore)
parseSomeLogStoreObj m_bp o = do
typ <- o .: "type"
buf_size <- (o .:? "buf-size"
>>= traverse (parseTextByParsec parseByteSizeWithUnit)
) .!= defaultBufSize
case typ of
"file" -> do
fp <- fmap (maybe id (</>) m_bp) $ o .: "path"
m_sz <- o .:? "cut-at-size" >>= traverse (parseTextByParsec parseByteSizeWithUnit)
case m_sz of
Nothing -> do
return $ do
liftM SomeLogStore $
newFileLoggerSet buf_size fp
`catchIOError` annotateRethrowIOError "newFileLoggerSet" Nothing (Just fp)
Just sz -> do
return $ do
liftM SomeLogStore $ newLogFileAtMaxSize sz buf_size fp
"stdout" -> return $ do
liftM SomeLogStore $ newStdoutLoggerSet buf_size
"stderr" -> return $ do
liftM SomeLogStore $ newStderrLoggerSet buf_size
_ -> fail $ "unknown handler type: " ++ typ
parseSomeShouldLogPredObj :: Object -> AT.Parser (IO SomeShouldLogPredicator)
parseSomeShouldLogPredObj obj = do
src_level_map :: HashMap _ _ <- obj .: "src-level" >>= parse_map
return $ return $ SomeShouldLogPredicator src_level_map
where
parse_map = withObject "source-to-level-map" $ \o -> do
liftM mapFromList $
forM (mapToList o) $ \(k, v) -> do
lv <- withText "LogLevel" (return . logLevelFromText) v
return (T.strip k, lv)
-- | use this in AppSettings of scaffold site
data LoggerConfig = LoggerConfig
(Vector (IO SomeLogStore, IO SomeShouldLogPredicator))
(Maybe (IO SomeLogStore, IO SomeShouldLogPredicator))
-- ^ default value to use when no others handles the logs
instance Default LoggerConfig where
def = LoggerConfig mempty Nothing
instance FromJSON LoggerConfig where
parseJSON = withObject "LoggerConfig" $ \obj -> do
m_base_path <- obj .:? "base-path"
LoggerConfig
<$> (fmap fromList $ obj .: "others" >>= parseSomeObjects "LoggerConfig others" (parse_obj m_base_path))
<*> (obj .:? "default" >>= traverse (parse_obj m_base_path))
where
parse_obj m_bp = \o -> do
(,) <$> parseSomeLogStoreObj m_bp o
<*> parseSomeShouldLogPredObj o
-- | real value for handling all logs
data LogHandlerV = LogHandlerV
DateCacheGetter
(Vector (SomeLogStore, SomeShouldLogPredicator))
(Maybe (SomeLogStore, SomeShouldLogPredicator))
newLogHandlerV :: DateCacheGetter -> LoggerConfig -> IO LogHandlerV
newLogHandlerV getdate (LoggerConfig v m_def) = do
v2 <- forM v $ uncurry (liftM2 (,))
m_def2 <- traverse (uncurry (liftM2 (,))) m_def
return $ LogHandlerV getdate v2 m_def2
-- | use this with runLoggingT
logFuncByHandlerV ::
LogHandlerV
-> LoggingFunc
logFuncByHandlerV (LogHandlerV getdate v m_def) loc src level msg = do
log_str_ioref <- newIORef Nothing
let get_log_str = do
m_log_str <- readIORef log_str_ioref
case m_log_str of
Nothing -> do
log_str <- formatLogMessage getdate loc src level msg
writeIORef log_str_ioref (Just log_str)
return log_str
Just x -> return x
not_done <- liftM (null . filter isJust) $ forM v $
\(SomeLogStore store, SomeShouldLogPredicator p) -> do
should_log <- lxShouldLog p src level
if should_log
then get_log_str >>= lxPushLogStr store >> return (Just ())
else return Nothing
when not_done $ do
void $ forM m_def $ \(SomeLogStore store, SomeShouldLogPredicator p) -> do
should_log <- lxShouldLog p src level
if should_log
then get_log_str >>= lxPushLogStr store >> return (Just ())
else return Nothing
logFuncFallbackByHandlerV ::
LogHandlerV
-> LoggingFunc
logFuncFallbackByHandlerV (LogHandlerV getdate _v m_def) loc src level msg = do
void $ forM m_def $ \(SomeLogStore store, SomeShouldLogPredicator p) -> do
should_log <- lxShouldLog p src level
when ( should_log ) $ do
formatLogMessage getdate loc src level msg
>>= lxPushLogStr store
-- | in scaffold site, makeApplication function:
-- we need a LoggerSet to make log middleware.
-- Hence we need to choose a LoggerSet in the available.
chooseYesodLoggerBySrcLV :: LogHandlerV -> LogSource -> IO (Maybe Logger)
chooseYesodLoggerBySrcLV (LogHandlerV getdate v _def_logger_set) src = do
stores <-
forM ([LevelDebug, LevelInfo, LevelWarn, LevelError, LevelOther ""]) $ \level -> do
liftM catMaybes $ forM (toList v) $
\(store, SomeShouldLogPredicator p) -> do
should_log <- lxShouldLog p src level
return $ if should_log
then Just store
else Nothing
forM (listToMaybe $ join stores) $ \(SomeLogStore store) -> do
return $ Logger (lxGetLoggerSet store) getdate
defaultYesodLoggerHandlerV :: LogHandlerV -> Maybe Logger
defaultYesodLoggerHandlerV (LogHandlerV getdate _v m_def) =
flip fmap m_def $ \(SomeLogStore store, _) ->
Logger (lxGetLoggerSet store) getdate
defaultShouldLogLV :: LogHandlerV -> LogSource -> LogLevel -> IO Bool
defaultShouldLogLV (LogHandlerV _getdate _v m_def) src level =
liftM (fromMaybe False) $
forM m_def $ \(_, SomeShouldLogPredicator p) ->
lxShouldLog p src level
instance LoggingTRunner LogHandlerV where
runLoggingTWith v = flip runLoggingT (logFuncByHandlerV v)
withLogFuncInHandlerT ::
LoggingFunc
-> HandlerT site m a
-> HandlerT site m a
withLogFuncInHandlerT log_func (HandlerT f) = HandlerT $ \hd -> do
let rhe = handlerEnv hd
let rhe' = rhe { rheLog = log_func }
hd' = hd { handlerEnv = rhe' }
f hd'
-- | usually, in 'HandlerT site m', log will go to the logger returned by
-- 'makeLogger'. With this function, log will be handled by runLoggingTWith
withSiteLogFuncInHandlerT :: (LoggingTRunner site, Monad m) =>
HandlerT site m a
-> HandlerT site m a
withSiteLogFuncInHandlerT h = do
foundation <- ask
runLoggingTWith foundation $ LoggingT $ \log_func ->
withLogFuncInHandlerT log_func h
cutLogFileThenArchive :: FilePath -> IO ()
cutLogFileThenArchive log_path = do
suf_n <- runResourceT $ sourceDirectory dir_name $$ find_next_n (0 :: Int)
let suf = '.' : show (suf_n + 1 :: Int)
renameFile log_path (log_path ++ suf)
where
(dir_name, log_file_name) = splitFileName log_path
find_next_n last_n = do
mx <- await
case mx of
Nothing -> return last_n
Just fp -> do
case stripPrefix (log_file_name ++ ".") (takeFileName fp) of
Nothing -> find_next_n last_n
Just suf -> do
case parse parse_suffix "" suf of
Left _ -> find_next_n last_n
Right x -> find_next_n $ max x last_n
parse_suffix = do
x <- PN.nat
_ <- eof <|> (Text.Parsec.char '.' >> return ())
return x
-- | To catch IOError rethrow with better message
annotateRethrowIOError :: String
-> Maybe Handle
-> Maybe FilePath
-> IOError
-> IO a
annotateRethrowIOError loc m_handle m_fp ex =
throwIO $ annotateIOError ex loc m_handle m_fp
-- | XXX: copied from source of yesod-core
formatLogMessage :: DateCacheGetter
-> Loc
-> LogSource
-> LogLevel
-> LogStr -- ^ message
-> IO LogStr
formatLogMessage getdate loc src level msg = do
now <- getdate
return $
toLogStr now `mappend`
" [" `mappend`
(case level of
LevelOther t -> toLogStr t
_ -> toLogStr $ drop 5 $ show level) `mappend`
(if null src
then mempty
else "#" `mappend` toLogStr src) `mappend`
"] " `mappend`
msg `mappend`
" @(" `mappend`
toLogStr (fileLocationToString loc) `mappend`
")\n"
-- taken from file-location package
-- turn the TH Loc loaction information into a human readable string
-- leaving out the loc_end parameter
fileLocationToString :: Loc -> String
fileLocationToString loc = (loc_package loc) ++ ':' : (loc_module loc) ++
' ' : (loc_filename loc) ++ ':' : (line loc) ++ ':' : (char loc)
where
line = show . fst . loc_start
char = show . snd . loc_start
| yoo-e/yesod-helpers | Yesod/Helpers/Logger.hs | bsd-3-clause | 15,756 | 0 | 23 | 4,835 | 3,853 | 1,924 | 1,929 | -1 | -1 |
{-
(c) The University of Glasgow 2006
(c) The GRASP/AQUA Project, Glasgow University, 1992-1998
The @match@ function
-}
{-# LANGUAGE CPP #-}
module Match ( match, matchEquations, matchWrapper, matchSimply, matchSinglePat ) where
#include "HsVersions.h"
import {-#SOURCE#-} DsExpr (dsLExpr, dsExpr)
import DynFlags
import HsSyn
import TcHsSyn
import TcEvidence
import TcRnMonad
import Check
import CoreSyn
import Literal
import CoreUtils
import MkCore
import DsMonad
import DsBinds
import DsGRHSs
import DsUtils
import Id
import ConLike
import DataCon
import PatSyn
import MatchCon
import MatchLit
import Type
import Coercion ( eqCoercion )
import TcType ( toTcTypeBag )
import TyCon( isNewTyCon )
import TysWiredIn
import ListSetOps
import SrcLoc
import Maybes
import Util
import Name
import Outputable
import BasicTypes ( isGenerated )
import Control.Monad( when, unless )
import qualified Data.Map as Map
{-
************************************************************************
* *
The main matching function
* *
************************************************************************
The function @match@ is basically the same as in the Wadler chapter,
except it is monadised, to carry around the name supply, info about
annotations, etc.
Notes on @match@'s arguments, assuming $m$ equations and $n$ patterns:
\begin{enumerate}
\item
A list of $n$ variable names, those variables presumably bound to the
$n$ expressions being matched against the $n$ patterns. Using the
list of $n$ expressions as the first argument showed no benefit and
some inelegance.
\item
The second argument, a list giving the ``equation info'' for each of
the $m$ equations:
\begin{itemize}
\item
the $n$ patterns for that equation, and
\item
a list of Core bindings [@(Id, CoreExpr)@ pairs] to be ``stuck on
the front'' of the matching code, as in:
\begin{verbatim}
let <binds>
in <matching-code>
\end{verbatim}
\item
and finally: (ToDo: fill in)
The right way to think about the ``after-match function'' is that it
is an embryonic @CoreExpr@ with a ``hole'' at the end for the
final ``else expression''.
\end{itemize}
There is a type synonym, @EquationInfo@, defined in module @DsUtils@.
An experiment with re-ordering this information about equations (in
particular, having the patterns available in column-major order)
showed no benefit.
\item
A default expression---what to evaluate if the overall pattern-match
fails. This expression will (almost?) always be
a measly expression @Var@, unless we know it will only be used once
(as we do in @glue_success_exprs@).
Leaving out this third argument to @match@ (and slamming in lots of
@Var "fail"@s) is a positively {\em bad} idea, because it makes it
impossible to share the default expressions. (Also, it stands no
chance of working in our post-upheaval world of @Locals@.)
\end{enumerate}
Note: @match@ is often called via @matchWrapper@ (end of this module),
a function that does much of the house-keeping that goes with a call
to @match@.
It is also worth mentioning the {\em typical} way a block of equations
is desugared with @match@. At each stage, it is the first column of
patterns that is examined. The steps carried out are roughly:
\begin{enumerate}
\item
Tidy the patterns in column~1 with @tidyEqnInfo@ (this may add
bindings to the second component of the equation-info):
\begin{itemize}
\item
Remove the `as' patterns from column~1.
\item
Make all constructor patterns in column~1 into @ConPats@, notably
@ListPats@ and @TuplePats@.
\item
Handle any irrefutable (or ``twiddle'') @LazyPats@.
\end{itemize}
\item
Now {\em unmix} the equations into {\em blocks} [w\/ local function
@unmix_eqns@], in which the equations in a block all have variable
patterns in column~1, or they all have constructor patterns in ...
(see ``the mixture rule'' in SLPJ).
\item
Call @matchEqnBlock@ on each block of equations; it will do the
appropriate thing for each kind of column-1 pattern, usually ending up
in a recursive call to @match@.
\end{enumerate}
We are a little more paranoid about the ``empty rule'' (SLPJ, p.~87)
than the Wadler-chapter code for @match@ (p.~93, first @match@ clause).
And gluing the ``success expressions'' together isn't quite so pretty.
This (more interesting) clause of @match@ uses @tidy_and_unmix_eqns@
(a)~to get `as'- and `twiddle'-patterns out of the way (tidying), and
(b)~to do ``the mixture rule'' (SLPJ, p.~88) [which really {\em
un}mixes the equations], producing a list of equation-info
blocks, each block having as its first column of patterns either all
constructors, or all variables (or similar beasts), etc.
@match_unmixed_eqn_blks@ simply takes the place of the @foldr@ in the
Wadler-chapter @match@ (p.~93, last clause), and @match_unmixed_blk@
corresponds roughly to @matchVarCon@.
-}
match :: [Id] -- Variables rep\'ing the exprs we\'re matching with
-> Type -- Type of the case expression
-> [EquationInfo] -- Info about patterns, etc. (type synonym below)
-> DsM MatchResult -- Desugared result!
match [] ty eqns
= ASSERT2( not (null eqns), ppr ty )
return (foldr1 combineMatchResults match_results)
where
match_results = [ ASSERT( null (eqn_pats eqn) )
eqn_rhs eqn
| eqn <- eqns ]
match vars@(v:_) ty eqns -- Eqns *can* be empty
= do { dflags <- getDynFlags
-- Tidy the first pattern, generating
-- auxiliary bindings if necessary
; (aux_binds, tidy_eqns) <- mapAndUnzipM (tidyEqnInfo v) eqns
-- Group the equations and match each group in turn
; let grouped = groupEquations dflags tidy_eqns
-- print the view patterns that are commoned up to help debug
; whenDOptM Opt_D_dump_view_pattern_commoning (debug grouped)
; match_results <- match_groups grouped
; return (adjustMatchResult (foldr (.) id aux_binds) $
foldr1 combineMatchResults match_results) }
where
dropGroup :: [(PatGroup,EquationInfo)] -> [EquationInfo]
dropGroup = map snd
match_groups :: [[(PatGroup,EquationInfo)]] -> DsM [MatchResult]
-- Result list of [MatchResult] is always non-empty
match_groups [] = matchEmpty v ty
match_groups gs = mapM match_group gs
match_group :: [(PatGroup,EquationInfo)] -> DsM MatchResult
match_group [] = panic "match_group"
match_group eqns@((group,_) : _)
= case group of
PgCon {} -> matchConFamily vars ty (subGroup [(c,e) | (PgCon c, e) <- eqns])
PgSyn {} -> matchPatSyn vars ty (dropGroup eqns)
PgLit {} -> matchLiterals vars ty (subGroup [(l,e) | (PgLit l, e) <- eqns])
PgAny -> matchVariables vars ty (dropGroup eqns)
PgN {} -> matchNPats vars ty (dropGroup eqns)
PgNpK {} -> matchNPlusKPats vars ty (dropGroup eqns)
PgBang -> matchBangs vars ty (dropGroup eqns)
PgCo {} -> matchCoercion vars ty (dropGroup eqns)
PgView {} -> matchView vars ty (dropGroup eqns)
PgOverloadedList -> matchOverloadedList vars ty (dropGroup eqns)
-- FIXME: we should also warn about view patterns that should be
-- commoned up but are not
-- print some stuff to see what's getting grouped
-- use -dppr-debug to see the resolution of overloaded literals
debug eqns =
let gs = map (\group -> foldr (\ (p,_) -> \acc ->
case p of PgView e _ -> e:acc
_ -> acc) [] group) eqns
maybeWarn [] = return ()
maybeWarn l = warnDs (vcat l)
in
maybeWarn $ (map (\g -> text "Putting these view expressions into the same case:" <+> (ppr g))
(filter (not . null) gs))
matchEmpty :: Id -> Type -> DsM [MatchResult]
-- See Note [Empty case expressions]
matchEmpty var res_ty
= return [MatchResult CanFail mk_seq]
where
mk_seq fail = return $ mkWildCase (Var var) (idType var) res_ty
[(DEFAULT, [], fail)]
matchVariables :: [Id] -> Type -> [EquationInfo] -> DsM MatchResult
-- Real true variables, just like in matchVar, SLPJ p 94
-- No binding to do: they'll all be wildcards by now (done in tidy)
matchVariables (_:vars) ty eqns = match vars ty (shiftEqns eqns)
matchVariables [] _ _ = panic "matchVariables"
matchBangs :: [Id] -> Type -> [EquationInfo] -> DsM MatchResult
matchBangs (var:vars) ty eqns
= do { match_result <- match (var:vars) ty $
map (decomposeFirstPat getBangPat) eqns
; return (mkEvalMatchResult var ty match_result) }
matchBangs [] _ _ = panic "matchBangs"
matchCoercion :: [Id] -> Type -> [EquationInfo] -> DsM MatchResult
-- Apply the coercion to the match variable and then match that
matchCoercion (var:vars) ty (eqns@(eqn1:_))
= do { let CoPat co pat _ = firstPat eqn1
; let pat_ty' = hsPatType pat
; var' <- newUniqueId var pat_ty'
; match_result <- match (var':vars) ty $
map (decomposeFirstPat getCoPat) eqns
; rhs' <- dsHsWrapper co (Var var)
; return (mkCoLetMatchResult (NonRec var' rhs') match_result) }
matchCoercion _ _ _ = panic "matchCoercion"
matchView :: [Id] -> Type -> [EquationInfo] -> DsM MatchResult
-- Apply the view function to the match variable and then match that
matchView (var:vars) ty (eqns@(eqn1:_))
= do { -- we could pass in the expr from the PgView,
-- but this needs to extract the pat anyway
-- to figure out the type of the fresh variable
let ViewPat viewExpr (L _ pat) _ = firstPat eqn1
-- do the rest of the compilation
; let pat_ty' = hsPatType pat
; var' <- newUniqueId var pat_ty'
; match_result <- match (var':vars) ty $
map (decomposeFirstPat getViewPat) eqns
-- compile the view expressions
; viewExpr' <- dsLExpr viewExpr
; return (mkViewMatchResult var' viewExpr' var match_result) }
matchView _ _ _ = panic "matchView"
matchOverloadedList :: [Id] -> Type -> [EquationInfo] -> DsM MatchResult
matchOverloadedList (var:vars) ty (eqns@(eqn1:_))
-- Since overloaded list patterns are treated as view patterns,
-- the code is roughly the same as for matchView
= do { let ListPat _ elt_ty (Just (_,e)) = firstPat eqn1
; var' <- newUniqueId var (mkListTy elt_ty) -- we construct the overall type by hand
; match_result <- match (var':vars) ty $
map (decomposeFirstPat getOLPat) eqns -- getOLPat builds the pattern inside as a non-overloaded version of the overloaded list pattern
; e' <- dsExpr e
; return (mkViewMatchResult var' e' var match_result) }
matchOverloadedList _ _ _ = panic "matchOverloadedList"
-- decompose the first pattern and leave the rest alone
decomposeFirstPat :: (Pat Id -> Pat Id) -> EquationInfo -> EquationInfo
decomposeFirstPat extractpat (eqn@(EqnInfo { eqn_pats = pat : pats }))
= eqn { eqn_pats = extractpat pat : pats}
decomposeFirstPat _ _ = panic "decomposeFirstPat"
getCoPat, getBangPat, getViewPat, getOLPat :: Pat Id -> Pat Id
getCoPat (CoPat _ pat _) = pat
getCoPat _ = panic "getCoPat"
getBangPat (BangPat pat ) = unLoc pat
getBangPat _ = panic "getBangPat"
getViewPat (ViewPat _ pat _) = unLoc pat
getViewPat _ = panic "getViewPat"
getOLPat (ListPat pats ty (Just _)) = ListPat pats ty Nothing
getOLPat _ = panic "getOLPat"
{-
Note [Empty case alternatives]
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
The list of EquationInfo can be empty, arising from
case x of {} or \case {}
In that situation we desugar to
case x of { _ -> error "pattern match failure" }
The *desugarer* isn't certain whether there really should be no
alternatives, so it adds a default case, as it always does. A later
pass may remove it if it's inaccessible. (See also Note [Empty case
alternatives] in CoreSyn.)
We do *not* desugar simply to
error "empty case"
or some such, because 'x' might be bound to (error "hello"), in which
case we want to see that "hello" exception, not (error "empty case").
See also Note [Case elimination: lifted case] in Simplify.
************************************************************************
* *
Tidying patterns
* *
************************************************************************
Tidy up the leftmost pattern in an @EquationInfo@, given the variable @v@
which will be scrutinised. This means:
\begin{itemize}
\item
Replace variable patterns @x@ (@x /= v@) with the pattern @_@,
together with the binding @x = v@.
\item
Replace the `as' pattern @x@@p@ with the pattern p and a binding @x = v@.
\item
Removing lazy (irrefutable) patterns (you don't want to know...).
\item
Converting explicit tuple-, list-, and parallel-array-pats into ordinary
@ConPats@.
\item
Convert the literal pat "" to [].
\end{itemize}
The result of this tidying is that the column of patterns will include
{\em only}:
\begin{description}
\item[@WildPats@:]
The @VarPat@ information isn't needed any more after this.
\item[@ConPats@:]
@ListPats@, @TuplePats@, etc., are all converted into @ConPats@.
\item[@LitPats@ and @NPats@:]
@LitPats@/@NPats@ of ``known friendly types'' (Int, Char,
Float, Double, at least) are converted to unboxed form; e.g.,
\tr{(NPat (HsInt i) _ _)} is converted to:
\begin{verbatim}
(ConPat I# _ _ [LitPat (HsIntPrim i)])
\end{verbatim}
\end{description}
-}
tidyEqnInfo :: Id -> EquationInfo
-> DsM (DsWrapper, EquationInfo)
-- DsM'd because of internal call to dsLHsBinds
-- and mkSelectorBinds.
-- "tidy1" does the interesting stuff, looking at
-- one pattern and fiddling the list of bindings.
--
-- POST CONDITION: head pattern in the EqnInfo is
-- WildPat
-- ConPat
-- NPat
-- LitPat
-- NPlusKPat
-- but no other
tidyEqnInfo _ (EqnInfo { eqn_pats = [] })
= panic "tidyEqnInfo"
tidyEqnInfo v eqn@(EqnInfo { eqn_pats = pat : pats })
= do { (wrap, pat') <- tidy1 v pat
; return (wrap, eqn { eqn_pats = do pat' : pats }) }
tidy1 :: Id -- The Id being scrutinised
-> Pat Id -- The pattern against which it is to be matched
-> DsM (DsWrapper, -- Extra bindings to do before the match
Pat Id) -- Equivalent pattern
-------------------------------------------------------
-- (pat', mr') = tidy1 v pat mr
-- tidies the *outer level only* of pat, giving pat'
-- It eliminates many pattern forms (as-patterns, variable patterns,
-- list patterns, etc) yielding one of:
-- WildPat
-- ConPatOut
-- LitPat
-- NPat
-- NPlusKPat
tidy1 v (ParPat pat) = tidy1 v (unLoc pat)
tidy1 v (SigPatOut pat _) = tidy1 v (unLoc pat)
tidy1 _ (WildPat ty) = return (idDsWrapper, WildPat ty)
tidy1 v (BangPat (L l p)) = tidy_bang_pat v l p
-- case v of { x -> mr[] }
-- = case v of { _ -> let x=v in mr[] }
tidy1 v (VarPat (L _ var))
= return (wrapBind var v, WildPat (idType var))
-- case v of { x@p -> mr[] }
-- = case v of { p -> let x=v in mr[] }
tidy1 v (AsPat (L _ var) pat)
= do { (wrap, pat') <- tidy1 v (unLoc pat)
; return (wrapBind var v . wrap, pat') }
{- now, here we handle lazy patterns:
tidy1 v ~p bs = (v, v1 = case v of p -> v1 :
v2 = case v of p -> v2 : ... : bs )
where the v_i's are the binders in the pattern.
ToDo: in "v_i = ... -> v_i", are the v_i's really the same thing?
The case expr for v_i is just: match [v] [(p, [], \ x -> Var v_i)] any_expr
-}
tidy1 v (LazyPat pat)
= do { (_,sel_prs) <- mkSelectorBinds False [] pat (Var v)
; let sel_binds = [NonRec b rhs | (b,rhs) <- sel_prs]
; return (mkCoreLets sel_binds, WildPat (idType v)) }
tidy1 _ (ListPat pats ty Nothing)
= return (idDsWrapper, unLoc list_ConPat)
where
list_ConPat = foldr (\ x y -> mkPrefixConPat consDataCon [x, y] [ty])
(mkNilPat ty)
pats
-- Introduce fake parallel array constructors to be able to handle parallel
-- arrays with the existing machinery for constructor pattern
tidy1 _ (PArrPat pats ty)
= return (idDsWrapper, unLoc parrConPat)
where
arity = length pats
parrConPat = mkPrefixConPat (parrFakeCon arity) pats [ty]
tidy1 _ (TuplePat pats boxity tys)
= return (idDsWrapper, unLoc tuple_ConPat)
where
arity = length pats
tuple_ConPat = mkPrefixConPat (tupleDataCon boxity arity) pats tys
-- LitPats: we *might* be able to replace these w/ a simpler form
tidy1 _ (LitPat lit)
= return (idDsWrapper, tidyLitPat lit)
-- NPats: we *might* be able to replace these w/ a simpler form
tidy1 _ (NPat (L _ lit) mb_neg eq)
= return (idDsWrapper, tidyNPat tidyLitPat lit mb_neg eq)
-- Everything else goes through unchanged...
tidy1 _ non_interesting_pat
= return (idDsWrapper, non_interesting_pat)
--------------------
tidy_bang_pat :: Id -> SrcSpan -> Pat Id -> DsM (DsWrapper, Pat Id)
-- Discard par/sig under a bang
tidy_bang_pat v _ (ParPat (L l p)) = tidy_bang_pat v l p
tidy_bang_pat v _ (SigPatOut (L l p) _) = tidy_bang_pat v l p
-- Push the bang-pattern inwards, in the hope that
-- it may disappear next time
tidy_bang_pat v l (AsPat v' p) = tidy1 v (AsPat v' (L l (BangPat p)))
tidy_bang_pat v l (CoPat w p t) = tidy1 v (CoPat w (BangPat (L l p)) t)
-- Discard bang around strict pattern
tidy_bang_pat v _ p@(LitPat {}) = tidy1 v p
tidy_bang_pat v _ p@(ListPat {}) = tidy1 v p
tidy_bang_pat v _ p@(TuplePat {}) = tidy1 v p
tidy_bang_pat v _ p@(PArrPat {}) = tidy1 v p
-- Data/newtype constructors
tidy_bang_pat v l p@(ConPatOut { pat_con = L _ (RealDataCon dc), pat_args = args })
| isNewTyCon (dataConTyCon dc) -- Newtypes: push bang inwards (Trac #9844)
= tidy1 v (p { pat_args = push_bang_into_newtype_arg l args })
| otherwise -- Data types: discard the bang
= tidy1 v p
-------------------
-- Default case, leave the bang there:
-- VarPat,
-- LazyPat,
-- WildPat,
-- ViewPat,
-- pattern synonyms (ConPatOut with PatSynCon)
-- NPat,
-- NPlusKPat
--
-- For LazyPat, remember that it's semantically like a VarPat
-- i.e. !(~p) is not like ~p, or p! (Trac #8952)
--
-- NB: SigPatIn, ConPatIn should not happen
tidy_bang_pat _ l p = return (idDsWrapper, BangPat (L l p))
-------------------
push_bang_into_newtype_arg :: SrcSpan -> HsConPatDetails Id -> HsConPatDetails Id
-- See Note [Bang patterns and newtypes]
-- We are transforming !(N p) into (N !p)
push_bang_into_newtype_arg l (PrefixCon (arg:args))
= ASSERT( null args)
PrefixCon [L l (BangPat arg)]
push_bang_into_newtype_arg l (RecCon rf)
| HsRecFields { rec_flds = L lf fld : flds } <- rf
, HsRecField { hsRecFieldArg = arg } <- fld
= ASSERT( null flds)
RecCon (rf { rec_flds = [L lf (fld { hsRecFieldArg = L l (BangPat arg) })] })
push_bang_into_newtype_arg _ cd
= pprPanic "push_bang_into_newtype_arg" (pprConArgs cd)
{-
Note [Bang patterns and newtypes]
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
For the pattern !(Just pat) we can discard the bang, because
the pattern is strict anyway. But for !(N pat), where
newtype NT = N Int
we definitely can't discard the bang. Trac #9844.
So what we do is to push the bang inwards, in the hope that it will
get discarded there. So we transform
!(N pat) into (N !pat)
\noindent
{\bf Previous @matchTwiddled@ stuff:}
Now we get to the only interesting part; note: there are choices for
translation [from Simon's notes]; translation~1:
\begin{verbatim}
deTwiddle [s,t] e
\end{verbatim}
returns
\begin{verbatim}
[ w = e,
s = case w of [s,t] -> s
t = case w of [s,t] -> t
]
\end{verbatim}
Here \tr{w} is a fresh variable, and the \tr{w}-binding prevents multiple
evaluation of \tr{e}. An alternative translation (No.~2):
\begin{verbatim}
[ w = case e of [s,t] -> (s,t)
s = case w of (s,t) -> s
t = case w of (s,t) -> t
]
\end{verbatim}
************************************************************************
* *
\subsubsection[improved-unmixing]{UNIMPLEMENTED idea for improved unmixing}
* *
************************************************************************
We might be able to optimise unmixing when confronted by
only-one-constructor-possible, of which tuples are the most notable
examples. Consider:
\begin{verbatim}
f (a,b,c) ... = ...
f d ... (e:f) = ...
f (g,h,i) ... = ...
f j ... = ...
\end{verbatim}
This definition would normally be unmixed into four equation blocks,
one per equation. But it could be unmixed into just one equation
block, because if the one equation matches (on the first column),
the others certainly will.
You have to be careful, though; the example
\begin{verbatim}
f j ... = ...
-------------------
f (a,b,c) ... = ...
f d ... (e:f) = ...
f (g,h,i) ... = ...
\end{verbatim}
{\em must} be broken into two blocks at the line shown; otherwise, you
are forcing unnecessary evaluation. In any case, the top-left pattern
always gives the cue. You could then unmix blocks into groups of...
\begin{description}
\item[all variables:]
As it is now.
\item[constructors or variables (mixed):]
Need to make sure the right names get bound for the variable patterns.
\item[literals or variables (mixed):]
Presumably just a variant on the constructor case (as it is now).
\end{description}
************************************************************************
* *
* matchWrapper: a convenient way to call @match@ *
* *
************************************************************************
\subsection[matchWrapper]{@matchWrapper@: a convenient interface to @match@}
Calls to @match@ often involve similar (non-trivial) work; that work
is collected here, in @matchWrapper@. This function takes as
arguments:
\begin{itemize}
\item
Typchecked @Matches@ (of a function definition, or a case or lambda
expression)---the main input;
\item
An error message to be inserted into any (runtime) pattern-matching
failure messages.
\end{itemize}
As results, @matchWrapper@ produces:
\begin{itemize}
\item
A list of variables (@Locals@) that the caller must ``promise'' to
bind to appropriate values; and
\item
a @CoreExpr@, the desugared output (main result).
\end{itemize}
The main actions of @matchWrapper@ include:
\begin{enumerate}
\item
Flatten the @[TypecheckedMatch]@ into a suitable list of
@EquationInfo@s.
\item
Create as many new variables as there are patterns in a pattern-list
(in any one of the @EquationInfo@s).
\item
Create a suitable ``if it fails'' expression---a call to @error@ using
the error-string input; the {\em type} of this fail value can be found
by examining one of the RHS expressions in one of the @EquationInfo@s.
\item
Call @match@ with all of this information!
\end{enumerate}
-}
matchWrapper :: HsMatchContext Name -- For shadowing warning messages
-> Maybe (LHsExpr Id) -- The scrutinee, if we check a case expr
-> MatchGroup Id (LHsExpr Id) -- Matches being desugared
-> DsM ([Id], CoreExpr) -- Results
{-
There is one small problem with the Lambda Patterns, when somebody
writes something similar to:
\begin{verbatim}
(\ (x:xs) -> ...)
\end{verbatim}
he/she don't want a warning about incomplete patterns, that is done with
the flag @opt_WarnSimplePatterns@.
This problem also appears in the:
\begin{itemize}
\item @do@ patterns, but if the @do@ can fail
it creates another equation if the match can fail
(see @DsExpr.doDo@ function)
\item @let@ patterns, are treated by @matchSimply@
List Comprension Patterns, are treated by @matchSimply@ also
\end{itemize}
We can't call @matchSimply@ with Lambda patterns,
due to the fact that lambda patterns can have more than
one pattern, and match simply only accepts one pattern.
JJQC 30-Nov-1997
-}
matchWrapper ctxt mb_scr (MG { mg_alts = L _ matches
, mg_arg_tys = arg_tys
, mg_res_ty = rhs_ty
, mg_origin = origin })
= do { dflags <- getDynFlags
; locn <- getSrcSpanDs
; new_vars <- case matches of
[] -> mapM newSysLocalDs arg_tys
(m:_) -> selectMatchVars (map unLoc (hsLMatchPats m))
; eqns_info <- mapM (mk_eqn_info new_vars) matches
-- pattern match check warnings
; unless (isGenerated origin) $ do
when (isAnyPmCheckEnabled dflags (DsMatchContext ctxt locn)) $ do
-- Count the number of guards that can fail
guards <- computeNoGuards matches
let simplify = not (gopt Opt_FullGuardReasoning dflags)
&& (guards > maximum_failing_guards)
-- See Note [Type and Term Equality Propagation]
addTmCsDs (genCaseTmCs1 mb_scr new_vars) $
dsPmWarn dflags (DsMatchContext ctxt locn) $
checkMatches simplify new_vars matches
when (not (gopt Opt_FullGuardReasoning dflags)
&& wopt Opt_WarnTooManyGuards dflags
&& guards > maximum_failing_guards)
(warnManyGuards (DsMatchContext ctxt locn))
; result_expr <- handleWarnings $
matchEquations ctxt new_vars eqns_info rhs_ty
; return (new_vars, result_expr) }
where
mk_eqn_info vars (L _ (Match _ pats _ grhss))
= do { dflags <- getDynFlags
; let upats = map (getMaybeStrictPat dflags) pats
dicts = toTcTypeBag (collectEvVarsPats upats) -- Only TcTyVars
; tm_cs <- genCaseTmCs2 mb_scr upats vars
; match_result <- addDictsDs dicts $ -- See Note [Type and Term Equality Propagation]
addTmCsDs tm_cs $ -- See Note [Type and Term Equality Propagation]
dsGRHSs ctxt upats grhss rhs_ty
; return (EqnInfo { eqn_pats = upats, eqn_rhs = match_result}) }
handleWarnings = if isGenerated origin
then discardWarningsDs
else id
matchEquations :: HsMatchContext Name
-> [Id] -> [EquationInfo] -> Type
-> DsM CoreExpr
matchEquations ctxt vars eqns_info rhs_ty
= do { let error_doc = matchContextErrString ctxt
; match_result <- match vars rhs_ty eqns_info
; fail_expr <- mkErrorAppDs pAT_ERROR_ID rhs_ty error_doc
; extractMatchResult match_result fail_expr }
{-
************************************************************************
* *
\subsection[matchSimply]{@matchSimply@: match a single expression against a single pattern}
* *
************************************************************************
@mkSimpleMatch@ is a wrapper for @match@ which deals with the
situation where we want to match a single expression against a single
pattern. It returns an expression.
-}
matchSimply :: CoreExpr -- Scrutinee
-> HsMatchContext Name -- Match kind
-> LPat Id -- Pattern it should match
-> CoreExpr -- Return this if it matches
-> CoreExpr -- Return this if it doesn't
-> DsM CoreExpr
-- Do not warn about incomplete patterns; see matchSinglePat comments
matchSimply scrut hs_ctx pat result_expr fail_expr = do
let
match_result = cantFailMatchResult result_expr
rhs_ty = exprType fail_expr
-- Use exprType of fail_expr, because won't refine in the case of failure!
match_result' <- matchSinglePat scrut hs_ctx pat rhs_ty match_result
extractMatchResult match_result' fail_expr
matchSinglePat :: CoreExpr -> HsMatchContext Name -> LPat Id
-> Type -> MatchResult -> DsM MatchResult
-- Do not warn about incomplete patterns
-- Used for things like [ e | pat <- stuff ], where
-- incomplete patterns are just fine
matchSinglePat (Var var) ctx pat ty match_result
= do { dflags <- getDynFlags
; locn <- getSrcSpanDs
; let pat' = getMaybeStrictPat dflags pat
-- pattern match check warnings
; dsPmWarn dflags (DsMatchContext ctx locn) (checkSingle var pat')
; match [var] ty
[EqnInfo { eqn_pats = [pat'], eqn_rhs = match_result }] }
matchSinglePat scrut hs_ctx pat ty match_result
= do { var <- selectSimpleMatchVarL pat
; match_result' <- matchSinglePat (Var var) hs_ctx pat ty match_result
; return (adjustMatchResult (bindNonRec var scrut) match_result') }
getMaybeStrictPat :: DynFlags -> LPat Id -> Pat Id
getMaybeStrictPat dflags pat =
let (is_strict, pat') = getUnBangedLPat dflags pat
in if is_strict then BangPat pat' else unLoc pat'
{-
************************************************************************
* *
Pattern classification
* *
************************************************************************
-}
data PatGroup
= PgAny -- Immediate match: variables, wildcards,
-- lazy patterns
| PgCon DataCon -- Constructor patterns (incl list, tuple)
| PgSyn PatSyn [Type] -- See Note [Pattern synonym groups]
| PgLit Literal -- Literal patterns
| PgN Literal -- Overloaded literals
| PgNpK Literal -- n+k patterns
| PgBang -- Bang patterns
| PgCo Type -- Coercion patterns; the type is the type
-- of the pattern *inside*
| PgView (LHsExpr Id) -- view pattern (e -> p):
-- the LHsExpr is the expression e
Type -- the Type is the type of p (equivalently, the result type of e)
| PgOverloadedList
groupEquations :: DynFlags -> [EquationInfo] -> [[(PatGroup, EquationInfo)]]
-- If the result is of form [g1, g2, g3],
-- (a) all the (pg,eq) pairs in g1 have the same pg
-- (b) none of the gi are empty
-- The ordering of equations is unchanged
groupEquations dflags eqns
= runs same_gp [(patGroup dflags (firstPat eqn), eqn) | eqn <- eqns]
where
same_gp :: (PatGroup,EquationInfo) -> (PatGroup,EquationInfo) -> Bool
(pg1,_) `same_gp` (pg2,_) = pg1 `sameGroup` pg2
subGroup :: Ord a => [(a, EquationInfo)] -> [[EquationInfo]]
-- Input is a particular group. The result sub-groups the
-- equations by with particular constructor, literal etc they match.
-- Each sub-list in the result has the same PatGroup
-- See Note [Take care with pattern order]
subGroup group
= map reverse $ Map.elems $ foldl accumulate Map.empty group
where
accumulate pg_map (pg, eqn)
= case Map.lookup pg pg_map of
Just eqns -> Map.insert pg (eqn:eqns) pg_map
Nothing -> Map.insert pg [eqn] pg_map
-- pg_map :: Map a [EquationInfo]
-- Equations seen so far in reverse order of appearance
{- Note [Pattern synonym groups]
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
If we see
f (P a) = e1
f (P b) = e2
...
where P is a pattern synonym, can we put (P a -> e1) and (P b -> e2) in the
same group? We can if P is a constructor, but /not/ if P is a pattern synonym.
Consider (Trac #11224)
-- readMaybe :: Read a => String -> Maybe a
pattern PRead :: Read a => () => a -> String
pattern PRead a <- (readMaybe -> Just a)
f (PRead (x::Int)) = e1
f (PRead (y::Bool)) = e2
This is all fine: we match the string by trying to read an Int; if that
fails we try to read a Bool. But clearly we can't combine the two into a single
match.
Conclusion: we can combine when we invoke PRead /at the same type/. Hence
in PgSyn we record the instantiaing types, and use them in sameGroup.
Note [Take care with pattern order]
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
In the subGroup function we must be very careful about pattern re-ordering,
Consider the patterns [ (True, Nothing), (False, x), (True, y) ]
Then in bringing together the patterns for True, we must not
swap the Nothing and y!
-}
sameGroup :: PatGroup -> PatGroup -> Bool
-- Same group means that a single case expression
-- or test will suffice to match both, *and* the order
-- of testing within the group is insignificant.
sameGroup PgAny PgAny = True
sameGroup PgBang PgBang = True
sameGroup (PgCon _) (PgCon _) = True -- One case expression
sameGroup (PgSyn p1 t1) (PgSyn p2 t2) = p1==p2 && eqTypes t1 t2
-- eqTypes: See Note [Pattern synonym groups]
sameGroup (PgLit _) (PgLit _) = True -- One case expression
sameGroup (PgN l1) (PgN l2) = l1==l2 -- Order is significant
sameGroup (PgNpK l1) (PgNpK l2) = l1==l2 -- See Note [Grouping overloaded literal patterns]
sameGroup (PgCo t1) (PgCo t2) = t1 `eqType` t2
-- CoPats are in the same goup only if the type of the
-- enclosed pattern is the same. The patterns outside the CoPat
-- always have the same type, so this boils down to saying that
-- the two coercions are identical.
sameGroup (PgView e1 t1) (PgView e2 t2) = viewLExprEq (e1,t1) (e2,t2)
-- ViewPats are in the same group iff the expressions
-- are "equal"---conservatively, we use syntactic equality
sameGroup _ _ = False
-- An approximation of syntactic equality used for determining when view
-- exprs are in the same group.
-- This function can always safely return false;
-- but doing so will result in the application of the view function being repeated.
--
-- Currently: compare applications of literals and variables
-- and anything else that we can do without involving other
-- HsSyn types in the recursion
--
-- NB we can't assume that the two view expressions have the same type. Consider
-- f (e1 -> True) = ...
-- f (e2 -> "hi") = ...
viewLExprEq :: (LHsExpr Id,Type) -> (LHsExpr Id,Type) -> Bool
viewLExprEq (e1,_) (e2,_) = lexp e1 e2
where
lexp :: LHsExpr Id -> LHsExpr Id -> Bool
lexp e e' = exp (unLoc e) (unLoc e')
---------
exp :: HsExpr Id -> HsExpr Id -> Bool
-- real comparison is on HsExpr's
-- strip parens
exp (HsPar (L _ e)) e' = exp e e'
exp e (HsPar (L _ e')) = exp e e'
-- because the expressions do not necessarily have the same type,
-- we have to compare the wrappers
exp (HsWrap h e) (HsWrap h' e') = wrap h h' && exp e e'
exp (HsVar i) (HsVar i') = i == i'
-- the instance for IPName derives using the id, so this works if the
-- above does
exp (HsIPVar i) (HsIPVar i') = i == i'
exp (HsOverLabel l) (HsOverLabel l') = l == l'
exp (HsOverLit l) (HsOverLit l') =
-- Overloaded lits are equal if they have the same type
-- and the data is the same.
-- this is coarser than comparing the SyntaxExpr's in l and l',
-- which resolve the overloading (e.g., fromInteger 1),
-- because these expressions get written as a bunch of different variables
-- (presumably to improve sharing)
eqType (overLitType l) (overLitType l') && l == l'
exp (HsApp e1 e2) (HsApp e1' e2') = lexp e1 e1' && lexp e2 e2'
-- the fixities have been straightened out by now, so it's safe
-- to ignore them?
exp (OpApp l o _ ri) (OpApp l' o' _ ri') =
lexp l l' && lexp o o' && lexp ri ri'
exp (NegApp e n) (NegApp e' n') = lexp e e' && exp n n'
exp (SectionL e1 e2) (SectionL e1' e2') =
lexp e1 e1' && lexp e2 e2'
exp (SectionR e1 e2) (SectionR e1' e2') =
lexp e1 e1' && lexp e2 e2'
exp (ExplicitTuple es1 _) (ExplicitTuple es2 _) =
eq_list tup_arg es1 es2
exp (HsIf _ e e1 e2) (HsIf _ e' e1' e2') =
lexp e e' && lexp e1 e1' && lexp e2 e2'
-- Enhancement: could implement equality for more expressions
-- if it seems useful
-- But no need for HsLit, ExplicitList, ExplicitTuple,
-- because they cannot be functions
exp _ _ = False
---------
tup_arg (L _ (Present e1)) (L _ (Present e2)) = lexp e1 e2
tup_arg (L _ (Missing t1)) (L _ (Missing t2)) = eqType t1 t2
tup_arg _ _ = False
---------
wrap :: HsWrapper -> HsWrapper -> Bool
-- Conservative, in that it demands that wrappers be
-- syntactically identical and doesn't look under binders
--
-- Coarser notions of equality are possible
-- (e.g., reassociating compositions,
-- equating different ways of writing a coercion)
wrap WpHole WpHole = True
wrap (WpCompose w1 w2) (WpCompose w1' w2') = wrap w1 w1' && wrap w2 w2'
wrap (WpFun w1 w2 _) (WpFun w1' w2' _) = wrap w1 w1' && wrap w2 w2'
wrap (WpCast co) (WpCast co') = co `eqCoercion` co'
wrap (WpEvApp et1) (WpEvApp et2) = et1 `ev_term` et2
wrap (WpTyApp t) (WpTyApp t') = eqType t t'
-- Enhancement: could implement equality for more wrappers
-- if it seems useful (lams and lets)
wrap _ _ = False
---------
ev_term :: EvTerm -> EvTerm -> Bool
ev_term (EvId a) (EvId b) = a==b
ev_term (EvCoercion a) (EvCoercion b) = a `eqCoercion` b
ev_term _ _ = False
---------
eq_list :: (a->a->Bool) -> [a] -> [a] -> Bool
eq_list _ [] [] = True
eq_list _ [] (_:_) = False
eq_list _ (_:_) [] = False
eq_list eq (x:xs) (y:ys) = eq x y && eq_list eq xs ys
patGroup :: DynFlags -> Pat Id -> PatGroup
patGroup _ (ConPatOut { pat_con = L _ con
, pat_arg_tys = tys })
| RealDataCon dcon <- con = PgCon dcon
| PatSynCon psyn <- con = PgSyn psyn tys
patGroup _ (WildPat {}) = PgAny
patGroup _ (BangPat {}) = PgBang
patGroup _ (NPat (L _ olit) mb_neg _) = PgN (hsOverLitKey olit (isJust mb_neg))
patGroup _ (NPlusKPat _ (L _ olit) _ _) = PgNpK (hsOverLitKey olit False)
patGroup _ (CoPat _ p _) = PgCo (hsPatType p) -- Type of innelexp pattern
patGroup _ (ViewPat expr p _) = PgView expr (hsPatType (unLoc p))
patGroup _ (ListPat _ _ (Just _)) = PgOverloadedList
patGroup dflags (LitPat lit) = PgLit (hsLitKey dflags lit)
patGroup _ pat = pprPanic "patGroup" (ppr pat)
{-
Note [Grouping overloaded literal patterns]
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
WATCH OUT! Consider
f (n+1) = ...
f (n+2) = ...
f (n+1) = ...
We can't group the first and third together, because the second may match
the same thing as the first. Same goes for *overloaded* literal patterns
f 1 True = ...
f 2 False = ...
f 1 False = ...
If the first arg matches '1' but the second does not match 'True', we
cannot jump to the third equation! Because the same argument might
match '2'!
Hence we don't regard 1 and 2, or (n+1) and (n+2), as part of the same group.
-}
| gridaphobe/ghc | compiler/deSugar/Match.hs | bsd-3-clause | 40,083 | 12 | 21 | 10,631 | 6,852 | 3,582 | 3,270 | 381 | 28 |
{-# OPTIONS_GHC -Wall #-}
module BP.Ast where
import BP.Type
import Data.List(intercalate)
import qualified Text.PrettyPrint as PP
type EName = String
type Terms = [Term]
data ParamWithOptionalType = Param EName (Maybe Type)
data Term = Ident EName
| Lambda EName Term
| Function EName [ParamWithOptionalType] Term (Maybe Type)
| Apply Term Term
| Call Term Terms
| Let EName Term Term
| LetBinding EName Term (Maybe Type)
| LetRec EName Term Term
stringOfParam :: ParamWithOptionalType -> String
stringOfParam (Param name t) = case t of
Just t' -> name ++ " : " ++ show t'
Nothing -> name
stringOfTerm :: Term -> String
stringOfTerm t = case t of
Ident n -> n
Lambda v b -> "λ" ++ v ++ " → " ++ stringOfTerm b
--Function
Function name params body rtnType -> "ƒ " ++ name ++ "(" ++ intercalate ", " (map stringOfParam params) ++ ") → "
++ stringOfTerm body ++ case rtnType of
Just t' -> " : " ++ show t'
Nothing -> ""
Apply fn arg -> "(" ++ stringOfTerm fn ++ " " ++ stringOfTerm arg ++ ")"
Call fn args -> "(" ++ stringOfTerm fn ++ " " ++ intercalate ", " (map stringOfTerm args) ++ ")"
Let v def body -> "let " ++ v ++ " = " ++ stringOfTerm def ++ " in " ++ stringOfTerm body
LetBinding v def ty -> "let " ++ v ++ stringOfTerm def ++ case ty of
Just ty' -> " : " ++ show ty'
Nothing -> ""
LetRec v def body -> "letrec " ++ v ++ " = " ++ stringOfTerm def ++ " in " ++ stringOfTerm body
instance Show Term where
showsPrec _ x = shows $ PP.text $ stringOfTerm x
| zjhmale/HMF | src/BP/Ast.hs | bsd-3-clause | 2,141 | 0 | 14 | 968 | 577 | 289 | 288 | 37 | 10 |
{-# OPTIONS_GHC -fno-warn-dodgy-exports #-}
{-# LANGUAGE BangPatterns #-}
{-# LANGUAGE NoImplicitPrelude #-}
module Sound
(
-- * Reexports
-- $reexports
module Control.Monad
, module Data.Complex
, module Data.Int
-- * Input, output, and conversion
, module Sound.Io.Snd
, module Sound.Io
, module Sound.IoPtr
, module Sound.Sox
, module Sound.IoSox
, module Sound.Portaudio
-- * Abstraction ladder
, module Sound.Class
, module Sound.Pair
, module Sound.Compile
, module Sound.InfList
, module Sound.List
-- * Buffer
, module Sound.Buffer
, module Sound.Ptr
-- * Time, tempo
, module Sound.Time
, module Sound.Tempo
-- * Sampling
, module Sound.Sample
-- * Decibels, contours, envelopes, limiting, clamping
, module Sound.Amplitude
, module Sound.Ramp
-- * Wavetable
, module Sound.Table
-- * White noise
, module Sound.Random
-- * Fourier transform
, module Sound.Fourier
-- * Frequency modulation
, module Sound.Fm
-- * Integration
, module Sound.Int
-- * Instrument
, module Sound.Perform
-- * MIDI input
, module Sound.Midi
-- * Error handling, type hints
, module Sound.Hint
-- * Filters: zeros and poles
, module Sound.Filter
-- * Unclear
, module Sound.Function
-- * Failed experiments
, module Sound.Abstract
, module Sound.Endo
, module Sound.GenBody
, module Sound.GeneratorContinuation
, module Sound.IoFail
, module Sound.Stream
, module Sound.StreamVector
)
where
import Data.Complex
import Data.Int
import Control.Applicative
import Control.Monad
import Sound.Abstract
import Sound.Amplitude
import Sound.Buffer
import Sound.Class
import Sound.Compile
import Sound.Endo
import Sound.Filter
import Sound.Fourier
import Sound.Fm
import Sound.Function
import Sound.GenBody
import Sound.GeneratorContinuation
import Sound.Hint
import Sound.InfList
import Sound.Int
import Sound.Io
import Sound.Io.Snd ()
import Sound.IoFail
import Sound.IoPtr
import Sound.IoSox
import Sound.List ()
import Sound.Midi
import Sound.Pair
import Sound.Perform
import Sound.Portaudio ()
import Sound.Ptr
import Sound.Ramp
import Sound.Random
import Sound.Sample
import Sound.Sox hiding (Rate)
import Sound.Stream
import Sound.StreamVector
import Sound.Table
import Sound.Tempo
import Sound.Time
import qualified Prelude as P
{- $reexports
If you use this module unqualified, you have to hide Prelude imports to avoid name clashes.
The portable way to do that is by adding this import to your code:
@
import Prelude ()
@
This module replaces 'P..' and 'P.id' from "Prelude"
with '.' and 'id' from "Control.Category".
This module hides 'P.seq' from "Prelude".
This module reexports everything else from all modules listed here.
-}
| edom/sound | src/Sound.hs | bsd-3-clause | 2,856 | 0 | 5 | 599 | 475 | 313 | 162 | 83 | 0 |
{-# LANGUAGE CPP, DeriveFunctor, GADTs, PatternSynonyms #-}
-----------------------------------------------------------------------------
--
-- Pretty-printing of Cmm as C, suitable for feeding gcc
--
-- (c) The University of Glasgow 2004-2006
--
-- Print Cmm as real C, for -fvia-C
--
-- See wiki:commentary/compiler/backends/ppr-c
--
-- This is simpler than the old PprAbsC, because Cmm is "macro-expanded"
-- relative to the old AbstractC, and many oddities/decorations have
-- disappeared from the data type.
--
-- This code generator is only supported in unregisterised mode.
--
-----------------------------------------------------------------------------
module GHC.CmmToC (
writeC
) where
#include "HsVersions.h"
-- Cmm stuff
import GhcPrelude
import GHC.Cmm.BlockId
import GHC.Cmm.CLabel
import ForeignCall
import GHC.Cmm hiding (pprBBlock)
import GHC.Cmm.Ppr () -- For Outputable instances
import GHC.Cmm.Dataflow.Block
import GHC.Cmm.Dataflow.Collections
import GHC.Cmm.Dataflow.Graph
import GHC.Cmm.Utils
import GHC.Cmm.Switch
-- Utils
import CPrim
import DynFlags
import FastString
import Outputable
import GHC.Platform
import UniqSet
import UniqFM
import Unique
import Util
-- The rest
import Data.ByteString (ByteString)
import qualified Data.ByteString as BS
import Control.Monad.ST
import Data.Bits
import Data.Char
import Data.List
import Data.Map (Map)
import Data.Word
import System.IO
import qualified Data.Map as Map
import Control.Monad (ap)
import qualified Data.Array.Unsafe as U ( castSTUArray )
import Data.Array.ST
-- --------------------------------------------------------------------------
-- Top level
writeC :: DynFlags -> Handle -> RawCmmGroup -> IO ()
writeC dflags handle cmm = printForC dflags handle (pprC cmm $$ blankLine)
-- --------------------------------------------------------------------------
-- Now do some real work
--
-- for fun, we could call cmmToCmm over the tops...
--
pprC :: RawCmmGroup -> SDoc
pprC tops = vcat $ intersperse blankLine $ map pprTop tops
--
-- top level procs
--
pprTop :: RawCmmDecl -> SDoc
pprTop (CmmProc infos clbl _in_live_regs graph) =
(case mapLookup (g_entry graph) infos of
Nothing -> empty
Just (RawCmmStatics info_clbl info_dat) ->
pprDataExterns info_dat $$
pprWordArray info_is_in_rodata info_clbl info_dat) $$
(vcat [
blankLine,
extern_decls,
(if (externallyVisibleCLabel clbl)
then mkFN_ else mkIF_) (ppr clbl) <+> lbrace,
nest 8 temp_decls,
vcat (map pprBBlock blocks),
rbrace ]
)
where
-- info tables are always in .rodata
info_is_in_rodata = True
blocks = toBlockListEntryFirst graph
(temp_decls, extern_decls) = pprTempAndExternDecls blocks
-- Chunks of static data.
-- We only handle (a) arrays of word-sized things and (b) strings.
pprTop (CmmData section (RawCmmStatics lbl [CmmString str])) =
pprExternDecl lbl $$
hcat [
pprLocalness lbl, pprConstness (isSecConstant section), text "char ", ppr lbl,
text "[] = ", pprStringInCStyle str, semi
]
pprTop (CmmData section (RawCmmStatics lbl [CmmUninitialised size])) =
pprExternDecl lbl $$
hcat [
pprLocalness lbl, pprConstness (isSecConstant section), text "char ", ppr lbl,
brackets (int size), semi
]
pprTop (CmmData section (RawCmmStatics lbl lits)) =
pprDataExterns lits $$
pprWordArray (isSecConstant section) lbl lits
-- --------------------------------------------------------------------------
-- BasicBlocks are self-contained entities: they always end in a jump.
--
-- Like nativeGen/AsmCodeGen, we could probably reorder blocks to turn
-- as many jumps as possible into fall throughs.
--
pprBBlock :: CmmBlock -> SDoc
pprBBlock block =
nest 4 (pprBlockId (entryLabel block) <> colon) $$
nest 8 (vcat (map pprStmt (blockToList nodes)) $$ pprStmt last)
where
(_, nodes, last) = blockSplit block
-- --------------------------------------------------------------------------
-- Info tables. Just arrays of words.
-- See codeGen/ClosureInfo, and nativeGen/PprMach
pprWordArray :: Bool -> CLabel -> [CmmStatic] -> SDoc
pprWordArray is_ro lbl ds
= sdocWithDynFlags $ \dflags ->
-- TODO: align closures only
pprExternDecl lbl $$
hcat [ pprLocalness lbl, pprConstness is_ro, text "StgWord"
, space, ppr lbl, text "[]"
-- See Note [StgWord alignment]
, pprAlignment (wordWidth dflags)
, text "= {" ]
$$ nest 8 (commafy (pprStatics dflags ds))
$$ text "};"
pprAlignment :: Width -> SDoc
pprAlignment words =
text "__attribute__((aligned(" <> int (widthInBytes words) <> text ")))"
-- Note [StgWord alignment]
-- C codegen builds static closures as StgWord C arrays (pprWordArray).
-- Their real C type is 'StgClosure'. Macros like UNTAG_CLOSURE assume
-- pointers to 'StgClosure' are aligned at pointer size boundary:
-- 4 byte boundary on 32 systems
-- and 8 bytes on 64-bit systems
-- see TAG_MASK and TAG_BITS definition and usage.
--
-- It's a reasonable assumption also known as natural alignment.
-- Although some architectures have different alignment rules.
-- One of known exceptions is m68k (#11395, comment:16) where:
-- __alignof__(StgWord) == 2, sizeof(StgWord) == 4
--
-- Thus we explicitly increase alignment by using
-- __attribute__((aligned(4)))
-- declaration.
--
-- has to be static, if it isn't globally visible
--
pprLocalness :: CLabel -> SDoc
pprLocalness lbl | not $ externallyVisibleCLabel lbl = text "static "
| otherwise = empty
pprConstness :: Bool -> SDoc
pprConstness is_ro | is_ro = text "const "
| otherwise = empty
-- --------------------------------------------------------------------------
-- Statements.
--
pprStmt :: CmmNode e x -> SDoc
pprStmt stmt =
sdocWithDynFlags $ \dflags ->
case stmt of
CmmEntry{} -> empty
CmmComment _ -> empty -- (hang (text "/*") 3 (ftext s)) $$ ptext (sLit "*/")
-- XXX if the string contains "*/", we need to fix it
-- XXX we probably want to emit these comments when
-- some debugging option is on. They can get quite
-- large.
CmmTick _ -> empty
CmmUnwind{} -> empty
CmmAssign dest src -> pprAssign dflags dest src
CmmStore dest src
| typeWidth rep == W64 && wordWidth dflags /= W64
-> (if isFloatType rep then text "ASSIGN_DBL"
else ptext (sLit ("ASSIGN_Word64"))) <>
parens (mkP_ <> pprExpr1 dest <> comma <> pprExpr src) <> semi
| otherwise
-> hsep [ pprExpr (CmmLoad dest rep), equals, pprExpr src <> semi ]
where
rep = cmmExprType dflags src
CmmUnsafeForeignCall target@(ForeignTarget fn conv) results args ->
fnCall
where
(res_hints, arg_hints) = foreignTargetHints target
hresults = zip results res_hints
hargs = zip args arg_hints
ForeignConvention cconv _ _ ret = conv
cast_fn = parens (cCast (pprCFunType (char '*') cconv hresults hargs) fn)
-- See wiki:commentary/compiler/backends/ppr-c#prototypes
fnCall =
case fn of
CmmLit (CmmLabel lbl)
| StdCallConv <- cconv ->
pprCall (ppr lbl) cconv hresults hargs
-- stdcall functions must be declared with
-- a function type, otherwise the C compiler
-- doesn't add the @n suffix to the label. We
-- can't add the @n suffix ourselves, because
-- it isn't valid C.
| CmmNeverReturns <- ret ->
pprCall cast_fn cconv hresults hargs <> semi
| not (isMathFun lbl) ->
pprForeignCall (ppr lbl) cconv hresults hargs
_ ->
pprCall cast_fn cconv hresults hargs <> semi
-- for a dynamic call, no declaration is necessary.
CmmUnsafeForeignCall (PrimTarget MO_Touch) _results _args -> empty
CmmUnsafeForeignCall (PrimTarget (MO_Prefetch_Data _)) _results _args -> empty
CmmUnsafeForeignCall target@(PrimTarget op) results args ->
fn_call
where
cconv = CCallConv
fn = pprCallishMachOp_for_C op
(res_hints, arg_hints) = foreignTargetHints target
hresults = zip results res_hints
hargs = zip args arg_hints
fn_call
-- The mem primops carry an extra alignment arg.
-- We could maybe emit an alignment directive using this info.
-- We also need to cast mem primops to prevent conflicts with GCC
-- builtins (see bug #5967).
| Just _align <- machOpMemcpyishAlign op
= (text ";EFF_(" <> fn <> char ')' <> semi) $$
pprForeignCall fn cconv hresults hargs
| otherwise
= pprCall fn cconv hresults hargs
CmmBranch ident -> pprBranch ident
CmmCondBranch expr yes no _ -> pprCondBranch expr yes no
CmmCall { cml_target = expr } -> mkJMP_ (pprExpr expr) <> semi
CmmSwitch arg ids -> sdocWithDynFlags $ \dflags ->
pprSwitch dflags arg ids
_other -> pprPanic "PprC.pprStmt" (ppr stmt)
type Hinted a = (a, ForeignHint)
pprForeignCall :: SDoc -> CCallConv -> [Hinted CmmFormal] -> [Hinted CmmActual]
-> SDoc
pprForeignCall fn cconv results args = fn_call
where
fn_call = braces (
pprCFunType (char '*' <> text "ghcFunPtr") cconv results args <> semi
$$ text "ghcFunPtr" <+> equals <+> cast_fn <> semi
$$ pprCall (text "ghcFunPtr") cconv results args <> semi
)
cast_fn = parens (parens (pprCFunType (char '*') cconv results args) <> fn)
pprCFunType :: SDoc -> CCallConv -> [Hinted CmmFormal] -> [Hinted CmmActual] -> SDoc
pprCFunType ppr_fn cconv ress args
= sdocWithDynFlags $ \dflags ->
let res_type [] = text "void"
res_type [(one, hint)] = machRepHintCType (localRegType one) hint
res_type _ = panic "pprCFunType: only void or 1 return value supported"
arg_type (expr, hint) = machRepHintCType (cmmExprType dflags expr) hint
in res_type ress <+>
parens (ccallConvAttribute cconv <> ppr_fn) <>
parens (commafy (map arg_type args))
-- ---------------------------------------------------------------------
-- unconditional branches
pprBranch :: BlockId -> SDoc
pprBranch ident = text "goto" <+> pprBlockId ident <> semi
-- ---------------------------------------------------------------------
-- conditional branches to local labels
pprCondBranch :: CmmExpr -> BlockId -> BlockId -> SDoc
pprCondBranch expr yes no
= hsep [ text "if" , parens(pprExpr expr) ,
text "goto", pprBlockId yes <> semi,
text "else goto", pprBlockId no <> semi ]
-- ---------------------------------------------------------------------
-- a local table branch
--
-- we find the fall-through cases
--
pprSwitch :: DynFlags -> CmmExpr -> SwitchTargets -> SDoc
pprSwitch dflags e ids
= (hang (text "switch" <+> parens ( pprExpr e ) <+> lbrace)
4 (vcat ( map caseify pairs ) $$ def)) $$ rbrace
where
(pairs, mbdef) = switchTargetsFallThrough ids
-- fall through case
caseify (ix:ixs, ident) = vcat (map do_fallthrough ixs) $$ final_branch ix
where
do_fallthrough ix =
hsep [ text "case" , pprHexVal ix (wordWidth dflags) <> colon ,
text "/* fall through */" ]
final_branch ix =
hsep [ text "case" , pprHexVal ix (wordWidth dflags) <> colon ,
text "goto" , (pprBlockId ident) <> semi ]
caseify (_ , _ ) = panic "pprSwitch: switch with no cases!"
def | Just l <- mbdef = text "default: goto" <+> pprBlockId l <> semi
| otherwise = empty
-- ---------------------------------------------------------------------
-- Expressions.
--
-- C Types: the invariant is that the C expression generated by
--
-- pprExpr e
--
-- has a type in C which is also given by
--
-- machRepCType (cmmExprType e)
--
-- (similar invariants apply to the rest of the pretty printer).
pprExpr :: CmmExpr -> SDoc
pprExpr e = case e of
CmmLit lit -> pprLit lit
CmmLoad e ty -> sdocWithDynFlags $ \dflags -> pprLoad dflags e ty
CmmReg reg -> pprCastReg reg
CmmRegOff reg 0 -> pprCastReg reg
-- CmmRegOff is an alias of MO_Add
CmmRegOff reg i -> sdocWithDynFlags $ \dflags ->
pprCastReg reg <> char '+' <>
pprHexVal (fromIntegral i) (wordWidth dflags)
CmmMachOp mop args -> pprMachOpApp mop args
CmmStackSlot _ _ -> panic "pprExpr: CmmStackSlot not supported!"
pprLoad :: DynFlags -> CmmExpr -> CmmType -> SDoc
pprLoad dflags e ty
| width == W64, wordWidth dflags /= W64
= (if isFloatType ty then text "PK_DBL"
else text "PK_Word64")
<> parens (mkP_ <> pprExpr1 e)
| otherwise
= case e of
CmmReg r | isPtrReg r && width == wordWidth dflags && not (isFloatType ty)
-> char '*' <> pprAsPtrReg r
CmmRegOff r 0 | isPtrReg r && width == wordWidth dflags && not (isFloatType ty)
-> char '*' <> pprAsPtrReg r
CmmRegOff r off | isPtrReg r && width == wordWidth dflags
, off `rem` wORD_SIZE dflags == 0 && not (isFloatType ty)
-- ToDo: check that the offset is a word multiple?
-- (For tagging to work, I had to avoid unaligned loads. --ARY)
-> pprAsPtrReg r <> brackets (ppr (off `shiftR` wordShift dflags))
_other -> cLoad e ty
where
width = typeWidth ty
pprExpr1 :: CmmExpr -> SDoc
pprExpr1 (CmmLit lit) = pprLit1 lit
pprExpr1 e@(CmmReg _reg) = pprExpr e
pprExpr1 other = parens (pprExpr other)
-- --------------------------------------------------------------------------
-- MachOp applications
pprMachOpApp :: MachOp -> [CmmExpr] -> SDoc
pprMachOpApp op args
| isMulMayOfloOp op
= text "mulIntMayOflo" <> parens (commafy (map pprExpr args))
where isMulMayOfloOp (MO_U_MulMayOflo _) = True
isMulMayOfloOp (MO_S_MulMayOflo _) = True
isMulMayOfloOp _ = False
pprMachOpApp mop args
| Just ty <- machOpNeedsCast mop
= ty <> parens (pprMachOpApp' mop args)
| otherwise
= pprMachOpApp' mop args
-- Comparisons in C have type 'int', but we want type W_ (this is what
-- resultRepOfMachOp says). The other C operations inherit their type
-- from their operands, so no casting is required.
machOpNeedsCast :: MachOp -> Maybe SDoc
machOpNeedsCast mop
| isComparisonMachOp mop = Just mkW_
| otherwise = Nothing
pprMachOpApp' :: MachOp -> [CmmExpr] -> SDoc
pprMachOpApp' mop args
= case args of
-- dyadic
[x,y] -> pprArg x <+> pprMachOp_for_C mop <+> pprArg y
-- unary
[x] -> pprMachOp_for_C mop <> parens (pprArg x)
_ -> panic "PprC.pprMachOp : machop with wrong number of args"
where
-- Cast needed for signed integer ops
pprArg e | signedOp mop = sdocWithDynFlags $ \dflags ->
cCast (machRep_S_CType (typeWidth (cmmExprType dflags e))) e
| needsFCasts mop = sdocWithDynFlags $ \dflags ->
cCast (machRep_F_CType (typeWidth (cmmExprType dflags e))) e
| otherwise = pprExpr1 e
needsFCasts (MO_F_Eq _) = False
needsFCasts (MO_F_Ne _) = False
needsFCasts (MO_F_Neg _) = True
needsFCasts (MO_F_Quot _) = True
needsFCasts mop = floatComparison mop
-- --------------------------------------------------------------------------
-- Literals
pprLit :: CmmLit -> SDoc
pprLit lit = case lit of
CmmInt i rep -> pprHexVal i rep
CmmFloat f w -> parens (machRep_F_CType w) <> str
where d = fromRational f :: Double
str | isInfinite d && d < 0 = text "-INFINITY"
| isInfinite d = text "INFINITY"
| isNaN d = text "NAN"
| otherwise = text (show d)
-- these constants come from <math.h>
-- see #1861
CmmVec {} -> panic "PprC printing vector literal"
CmmBlock bid -> mkW_ <> pprCLabelAddr (infoTblLbl bid)
CmmHighStackMark -> panic "PprC printing high stack mark"
CmmLabel clbl -> mkW_ <> pprCLabelAddr clbl
CmmLabelOff clbl i -> mkW_ <> pprCLabelAddr clbl <> char '+' <> int i
CmmLabelDiffOff clbl1 _ i _ -- non-word widths not supported via C
-- WARNING:
-- * the lit must occur in the info table clbl2
-- * clbl1 must be an SRT, a slow entry point or a large bitmap
-> mkW_ <> pprCLabelAddr clbl1 <> char '+' <> int i
where
pprCLabelAddr lbl = char '&' <> ppr lbl
pprLit1 :: CmmLit -> SDoc
pprLit1 lit@(CmmLabelOff _ _) = parens (pprLit lit)
pprLit1 lit@(CmmLabelDiffOff _ _ _ _) = parens (pprLit lit)
pprLit1 lit@(CmmFloat _ _) = parens (pprLit lit)
pprLit1 other = pprLit other
-- ---------------------------------------------------------------------------
-- Static data
pprStatics :: DynFlags -> [CmmStatic] -> [SDoc]
pprStatics _ [] = []
pprStatics dflags (CmmStaticLit (CmmFloat f W32) : rest)
-- odd numbers of floats are padded to a word by mkVirtHeapOffsetsWithPadding
| wORD_SIZE dflags == 8, CmmStaticLit (CmmInt 0 W32) : rest' <- rest
= pprLit1 (floatToWord dflags f) : pprStatics dflags rest'
-- adjacent floats aren't padded but combined into a single word
| wORD_SIZE dflags == 8, CmmStaticLit (CmmFloat g W32) : rest' <- rest
= pprLit1 (floatPairToWord dflags f g) : pprStatics dflags rest'
| wORD_SIZE dflags == 4
= pprLit1 (floatToWord dflags f) : pprStatics dflags rest
| otherwise
= pprPanic "pprStatics: float" (vcat (map ppr' rest))
where ppr' (CmmStaticLit l) = sdocWithDynFlags $ \dflags ->
ppr (cmmLitType dflags l)
ppr' _other = text "bad static!"
pprStatics dflags (CmmStaticLit (CmmFloat f W64) : rest)
= map pprLit1 (doubleToWords dflags f) ++ pprStatics dflags rest
pprStatics dflags (CmmStaticLit (CmmInt i W64) : rest)
| wordWidth dflags == W32
= if wORDS_BIGENDIAN dflags
then pprStatics dflags (CmmStaticLit (CmmInt q W32) :
CmmStaticLit (CmmInt r W32) : rest)
else pprStatics dflags (CmmStaticLit (CmmInt r W32) :
CmmStaticLit (CmmInt q W32) : rest)
where r = i .&. 0xffffffff
q = i `shiftR` 32
pprStatics dflags (CmmStaticLit (CmmInt a W32) :
CmmStaticLit (CmmInt b W32) : rest)
| wordWidth dflags == W64
= if wORDS_BIGENDIAN dflags
then pprStatics dflags (CmmStaticLit (CmmInt ((shiftL a 32) .|. b) W64) :
rest)
else pprStatics dflags (CmmStaticLit (CmmInt ((shiftL b 32) .|. a) W64) :
rest)
pprStatics dflags (CmmStaticLit (CmmInt a W16) :
CmmStaticLit (CmmInt b W16) : rest)
| wordWidth dflags == W32
= if wORDS_BIGENDIAN dflags
then pprStatics dflags (CmmStaticLit (CmmInt ((shiftL a 16) .|. b) W32) :
rest)
else pprStatics dflags (CmmStaticLit (CmmInt ((shiftL b 16) .|. a) W32) :
rest)
pprStatics dflags (CmmStaticLit (CmmInt _ w) : _)
| w /= wordWidth dflags
= pprPanic "pprStatics: cannot emit a non-word-sized static literal" (ppr w)
pprStatics dflags (CmmStaticLit lit : rest)
= pprLit1 lit : pprStatics dflags rest
pprStatics _ (other : _)
= pprPanic "pprStatics: other" (pprStatic other)
pprStatic :: CmmStatic -> SDoc
pprStatic s = case s of
CmmStaticLit lit -> nest 4 (pprLit lit)
CmmUninitialised i -> nest 4 (mkC_ <> brackets (int i))
-- these should be inlined, like the old .hc
CmmString s' -> nest 4 (mkW_ <> parens(pprStringInCStyle s'))
-- ---------------------------------------------------------------------------
-- Block Ids
pprBlockId :: BlockId -> SDoc
pprBlockId b = char '_' <> ppr (getUnique b)
-- --------------------------------------------------------------------------
-- Print a MachOp in a way suitable for emitting via C.
--
pprMachOp_for_C :: MachOp -> SDoc
pprMachOp_for_C mop = case mop of
-- Integer operations
MO_Add _ -> char '+'
MO_Sub _ -> char '-'
MO_Eq _ -> text "=="
MO_Ne _ -> text "!="
MO_Mul _ -> char '*'
MO_S_Quot _ -> char '/'
MO_S_Rem _ -> char '%'
MO_S_Neg _ -> char '-'
MO_U_Quot _ -> char '/'
MO_U_Rem _ -> char '%'
-- & Floating-point operations
MO_F_Add _ -> char '+'
MO_F_Sub _ -> char '-'
MO_F_Neg _ -> char '-'
MO_F_Mul _ -> char '*'
MO_F_Quot _ -> char '/'
-- Signed comparisons
MO_S_Ge _ -> text ">="
MO_S_Le _ -> text "<="
MO_S_Gt _ -> char '>'
MO_S_Lt _ -> char '<'
-- & Unsigned comparisons
MO_U_Ge _ -> text ">="
MO_U_Le _ -> text "<="
MO_U_Gt _ -> char '>'
MO_U_Lt _ -> char '<'
-- & Floating-point comparisons
MO_F_Eq _ -> text "=="
MO_F_Ne _ -> text "!="
MO_F_Ge _ -> text ">="
MO_F_Le _ -> text "<="
MO_F_Gt _ -> char '>'
MO_F_Lt _ -> char '<'
-- Bitwise operations. Not all of these may be supported at all
-- sizes, and only integral MachReps are valid.
MO_And _ -> char '&'
MO_Or _ -> char '|'
MO_Xor _ -> char '^'
MO_Not _ -> char '~'
MO_Shl _ -> text "<<"
MO_U_Shr _ -> text ">>" -- unsigned shift right
MO_S_Shr _ -> text ">>" -- signed shift right
-- Conversions. Some of these will be NOPs, but never those that convert
-- between ints and floats.
-- Floating-point conversions use the signed variant.
-- We won't know to generate (void*) casts here, but maybe from
-- context elsewhere
-- noop casts
MO_UU_Conv from to | from == to -> empty
MO_UU_Conv _from to -> parens (machRep_U_CType to)
MO_SS_Conv from to | from == to -> empty
MO_SS_Conv _from to -> parens (machRep_S_CType to)
MO_XX_Conv from to | from == to -> empty
MO_XX_Conv _from to -> parens (machRep_U_CType to)
MO_FF_Conv from to | from == to -> empty
MO_FF_Conv _from to -> parens (machRep_F_CType to)
MO_SF_Conv _from to -> parens (machRep_F_CType to)
MO_FS_Conv _from to -> parens (machRep_S_CType to)
MO_S_MulMayOflo _ -> pprTrace "offending mop:"
(text "MO_S_MulMayOflo")
(panic $ "PprC.pprMachOp_for_C: MO_S_MulMayOflo"
++ " should have been handled earlier!")
MO_U_MulMayOflo _ -> pprTrace "offending mop:"
(text "MO_U_MulMayOflo")
(panic $ "PprC.pprMachOp_for_C: MO_U_MulMayOflo"
++ " should have been handled earlier!")
MO_V_Insert {} -> pprTrace "offending mop:"
(text "MO_V_Insert")
(panic $ "PprC.pprMachOp_for_C: MO_V_Insert"
++ " should have been handled earlier!")
MO_V_Extract {} -> pprTrace "offending mop:"
(text "MO_V_Extract")
(panic $ "PprC.pprMachOp_for_C: MO_V_Extract"
++ " should have been handled earlier!")
MO_V_Add {} -> pprTrace "offending mop:"
(text "MO_V_Add")
(panic $ "PprC.pprMachOp_for_C: MO_V_Add"
++ " should have been handled earlier!")
MO_V_Sub {} -> pprTrace "offending mop:"
(text "MO_V_Sub")
(panic $ "PprC.pprMachOp_for_C: MO_V_Sub"
++ " should have been handled earlier!")
MO_V_Mul {} -> pprTrace "offending mop:"
(text "MO_V_Mul")
(panic $ "PprC.pprMachOp_for_C: MO_V_Mul"
++ " should have been handled earlier!")
MO_VS_Quot {} -> pprTrace "offending mop:"
(text "MO_VS_Quot")
(panic $ "PprC.pprMachOp_for_C: MO_VS_Quot"
++ " should have been handled earlier!")
MO_VS_Rem {} -> pprTrace "offending mop:"
(text "MO_VS_Rem")
(panic $ "PprC.pprMachOp_for_C: MO_VS_Rem"
++ " should have been handled earlier!")
MO_VS_Neg {} -> pprTrace "offending mop:"
(text "MO_VS_Neg")
(panic $ "PprC.pprMachOp_for_C: MO_VS_Neg"
++ " should have been handled earlier!")
MO_VU_Quot {} -> pprTrace "offending mop:"
(text "MO_VU_Quot")
(panic $ "PprC.pprMachOp_for_C: MO_VU_Quot"
++ " should have been handled earlier!")
MO_VU_Rem {} -> pprTrace "offending mop:"
(text "MO_VU_Rem")
(panic $ "PprC.pprMachOp_for_C: MO_VU_Rem"
++ " should have been handled earlier!")
MO_VF_Insert {} -> pprTrace "offending mop:"
(text "MO_VF_Insert")
(panic $ "PprC.pprMachOp_for_C: MO_VF_Insert"
++ " should have been handled earlier!")
MO_VF_Extract {} -> pprTrace "offending mop:"
(text "MO_VF_Extract")
(panic $ "PprC.pprMachOp_for_C: MO_VF_Extract"
++ " should have been handled earlier!")
MO_VF_Add {} -> pprTrace "offending mop:"
(text "MO_VF_Add")
(panic $ "PprC.pprMachOp_for_C: MO_VF_Add"
++ " should have been handled earlier!")
MO_VF_Sub {} -> pprTrace "offending mop:"
(text "MO_VF_Sub")
(panic $ "PprC.pprMachOp_for_C: MO_VF_Sub"
++ " should have been handled earlier!")
MO_VF_Neg {} -> pprTrace "offending mop:"
(text "MO_VF_Neg")
(panic $ "PprC.pprMachOp_for_C: MO_VF_Neg"
++ " should have been handled earlier!")
MO_VF_Mul {} -> pprTrace "offending mop:"
(text "MO_VF_Mul")
(panic $ "PprC.pprMachOp_for_C: MO_VF_Mul"
++ " should have been handled earlier!")
MO_VF_Quot {} -> pprTrace "offending mop:"
(text "MO_VF_Quot")
(panic $ "PprC.pprMachOp_for_C: MO_VF_Quot"
++ " should have been handled earlier!")
MO_AlignmentCheck {} -> panic "-falignment-santisation not supported by unregisterised backend"
signedOp :: MachOp -> Bool -- Argument type(s) are signed ints
signedOp (MO_S_Quot _) = True
signedOp (MO_S_Rem _) = True
signedOp (MO_S_Neg _) = True
signedOp (MO_S_Ge _) = True
signedOp (MO_S_Le _) = True
signedOp (MO_S_Gt _) = True
signedOp (MO_S_Lt _) = True
signedOp (MO_S_Shr _) = True
signedOp (MO_SS_Conv _ _) = True
signedOp (MO_SF_Conv _ _) = True
signedOp _ = False
floatComparison :: MachOp -> Bool -- comparison between float args
floatComparison (MO_F_Eq _) = True
floatComparison (MO_F_Ne _) = True
floatComparison (MO_F_Ge _) = True
floatComparison (MO_F_Le _) = True
floatComparison (MO_F_Gt _) = True
floatComparison (MO_F_Lt _) = True
floatComparison _ = False
-- ---------------------------------------------------------------------
-- tend to be implemented by foreign calls
pprCallishMachOp_for_C :: CallishMachOp -> SDoc
pprCallishMachOp_for_C mop
= case mop of
MO_F64_Pwr -> text "pow"
MO_F64_Sin -> text "sin"
MO_F64_Cos -> text "cos"
MO_F64_Tan -> text "tan"
MO_F64_Sinh -> text "sinh"
MO_F64_Cosh -> text "cosh"
MO_F64_Tanh -> text "tanh"
MO_F64_Asin -> text "asin"
MO_F64_Acos -> text "acos"
MO_F64_Atanh -> text "atanh"
MO_F64_Asinh -> text "asinh"
MO_F64_Acosh -> text "acosh"
MO_F64_Atan -> text "atan"
MO_F64_Log -> text "log"
MO_F64_Log1P -> text "log1p"
MO_F64_Exp -> text "exp"
MO_F64_ExpM1 -> text "expm1"
MO_F64_Sqrt -> text "sqrt"
MO_F64_Fabs -> text "fabs"
MO_F32_Pwr -> text "powf"
MO_F32_Sin -> text "sinf"
MO_F32_Cos -> text "cosf"
MO_F32_Tan -> text "tanf"
MO_F32_Sinh -> text "sinhf"
MO_F32_Cosh -> text "coshf"
MO_F32_Tanh -> text "tanhf"
MO_F32_Asin -> text "asinf"
MO_F32_Acos -> text "acosf"
MO_F32_Atan -> text "atanf"
MO_F32_Asinh -> text "asinhf"
MO_F32_Acosh -> text "acoshf"
MO_F32_Atanh -> text "atanhf"
MO_F32_Log -> text "logf"
MO_F32_Log1P -> text "log1pf"
MO_F32_Exp -> text "expf"
MO_F32_ExpM1 -> text "expm1f"
MO_F32_Sqrt -> text "sqrtf"
MO_F32_Fabs -> text "fabsf"
MO_ReadBarrier -> text "load_load_barrier"
MO_WriteBarrier -> text "write_barrier"
MO_Memcpy _ -> text "memcpy"
MO_Memset _ -> text "memset"
MO_Memmove _ -> text "memmove"
MO_Memcmp _ -> text "memcmp"
(MO_BSwap w) -> ptext (sLit $ bSwapLabel w)
(MO_BRev w) -> ptext (sLit $ bRevLabel w)
(MO_PopCnt w) -> ptext (sLit $ popCntLabel w)
(MO_Pext w) -> ptext (sLit $ pextLabel w)
(MO_Pdep w) -> ptext (sLit $ pdepLabel w)
(MO_Clz w) -> ptext (sLit $ clzLabel w)
(MO_Ctz w) -> ptext (sLit $ ctzLabel w)
(MO_AtomicRMW w amop) -> ptext (sLit $ atomicRMWLabel w amop)
(MO_Cmpxchg w) -> ptext (sLit $ cmpxchgLabel w)
(MO_AtomicRead w) -> ptext (sLit $ atomicReadLabel w)
(MO_AtomicWrite w) -> ptext (sLit $ atomicWriteLabel w)
(MO_UF_Conv w) -> ptext (sLit $ word2FloatLabel w)
MO_S_Mul2 {} -> unsupported
MO_S_QuotRem {} -> unsupported
MO_U_QuotRem {} -> unsupported
MO_U_QuotRem2 {} -> unsupported
MO_Add2 {} -> unsupported
MO_AddWordC {} -> unsupported
MO_SubWordC {} -> unsupported
MO_AddIntC {} -> unsupported
MO_SubIntC {} -> unsupported
MO_U_Mul2 {} -> unsupported
MO_Touch -> unsupported
(MO_Prefetch_Data _ ) -> unsupported
--- we could support prefetch via "__builtin_prefetch"
--- Not adding it for now
where unsupported = panic ("pprCallishMachOp_for_C: " ++ show mop
++ " not supported!")
-- ---------------------------------------------------------------------
-- Useful #defines
--
mkJMP_, mkFN_, mkIF_ :: SDoc -> SDoc
mkJMP_ i = text "JMP_" <> parens i
mkFN_ i = text "FN_" <> parens i -- externally visible function
mkIF_ i = text "IF_" <> parens i -- locally visible
-- from includes/Stg.h
--
mkC_,mkW_,mkP_ :: SDoc
mkC_ = text "(C_)" -- StgChar
mkW_ = text "(W_)" -- StgWord
mkP_ = text "(P_)" -- StgWord*
-- ---------------------------------------------------------------------
--
-- Assignments
--
-- Generating assignments is what we're all about, here
--
pprAssign :: DynFlags -> CmmReg -> CmmExpr -> SDoc
-- dest is a reg, rhs is a reg
pprAssign _ r1 (CmmReg r2)
| isPtrReg r1 && isPtrReg r2
= hcat [ pprAsPtrReg r1, equals, pprAsPtrReg r2, semi ]
-- dest is a reg, rhs is a CmmRegOff
pprAssign dflags r1 (CmmRegOff r2 off)
| isPtrReg r1 && isPtrReg r2 && (off `rem` wORD_SIZE dflags == 0)
= hcat [ pprAsPtrReg r1, equals, pprAsPtrReg r2, op, int off', semi ]
where
off1 = off `shiftR` wordShift dflags
(op,off') | off >= 0 = (char '+', off1)
| otherwise = (char '-', -off1)
-- dest is a reg, rhs is anything.
-- We can't cast the lvalue, so we have to cast the rhs if necessary. Casting
-- the lvalue elicits a warning from new GCC versions (3.4+).
pprAssign _ r1 r2
| isFixedPtrReg r1 = mkAssign (mkP_ <> pprExpr1 r2)
| Just ty <- strangeRegType r1 = mkAssign (parens ty <> pprExpr1 r2)
| otherwise = mkAssign (pprExpr r2)
where mkAssign x = if r1 == CmmGlobal BaseReg
then text "ASSIGN_BaseReg" <> parens x <> semi
else pprReg r1 <> text " = " <> x <> semi
-- ---------------------------------------------------------------------
-- Registers
pprCastReg :: CmmReg -> SDoc
pprCastReg reg
| isStrangeTypeReg reg = mkW_ <> pprReg reg
| otherwise = pprReg reg
-- True if (pprReg reg) will give an expression with type StgPtr. We
-- need to take care with pointer arithmetic on registers with type
-- StgPtr.
isFixedPtrReg :: CmmReg -> Bool
isFixedPtrReg (CmmLocal _) = False
isFixedPtrReg (CmmGlobal r) = isFixedPtrGlobalReg r
-- True if (pprAsPtrReg reg) will give an expression with type StgPtr
-- JD: THIS IS HORRIBLE AND SHOULD BE RENAMED, AT THE VERY LEAST.
-- THE GARBAGE WITH THE VNonGcPtr HELPS MATCH THE OLD CODE GENERATOR'S OUTPUT;
-- I'M NOT SURE IF IT SHOULD REALLY STAY THAT WAY.
isPtrReg :: CmmReg -> Bool
isPtrReg (CmmLocal _) = False
isPtrReg (CmmGlobal (VanillaReg _ VGcPtr)) = True -- if we print via pprAsPtrReg
isPtrReg (CmmGlobal (VanillaReg _ VNonGcPtr)) = False -- if we print via pprAsPtrReg
isPtrReg (CmmGlobal reg) = isFixedPtrGlobalReg reg
-- True if this global reg has type StgPtr
isFixedPtrGlobalReg :: GlobalReg -> Bool
isFixedPtrGlobalReg Sp = True
isFixedPtrGlobalReg Hp = True
isFixedPtrGlobalReg HpLim = True
isFixedPtrGlobalReg SpLim = True
isFixedPtrGlobalReg _ = False
-- True if in C this register doesn't have the type given by
-- (machRepCType (cmmRegType reg)), so it has to be cast.
isStrangeTypeReg :: CmmReg -> Bool
isStrangeTypeReg (CmmLocal _) = False
isStrangeTypeReg (CmmGlobal g) = isStrangeTypeGlobal g
isStrangeTypeGlobal :: GlobalReg -> Bool
isStrangeTypeGlobal CCCS = True
isStrangeTypeGlobal CurrentTSO = True
isStrangeTypeGlobal CurrentNursery = True
isStrangeTypeGlobal BaseReg = True
isStrangeTypeGlobal r = isFixedPtrGlobalReg r
strangeRegType :: CmmReg -> Maybe SDoc
strangeRegType (CmmGlobal CCCS) = Just (text "struct CostCentreStack_ *")
strangeRegType (CmmGlobal CurrentTSO) = Just (text "struct StgTSO_ *")
strangeRegType (CmmGlobal CurrentNursery) = Just (text "struct bdescr_ *")
strangeRegType (CmmGlobal BaseReg) = Just (text "struct StgRegTable_ *")
strangeRegType _ = Nothing
-- pprReg just prints the register name.
--
pprReg :: CmmReg -> SDoc
pprReg r = case r of
CmmLocal local -> pprLocalReg local
CmmGlobal global -> pprGlobalReg global
pprAsPtrReg :: CmmReg -> SDoc
pprAsPtrReg (CmmGlobal (VanillaReg n gcp))
= WARN( gcp /= VGcPtr, ppr n ) char 'R' <> int n <> text ".p"
pprAsPtrReg other_reg = pprReg other_reg
pprGlobalReg :: GlobalReg -> SDoc
pprGlobalReg gr = case gr of
VanillaReg n _ -> char 'R' <> int n <> text ".w"
-- pprGlobalReg prints a VanillaReg as a .w regardless
-- Example: R1.w = R1.w & (-0x8UL);
-- JMP_(*R1.p);
FloatReg n -> char 'F' <> int n
DoubleReg n -> char 'D' <> int n
LongReg n -> char 'L' <> int n
Sp -> text "Sp"
SpLim -> text "SpLim"
Hp -> text "Hp"
HpLim -> text "HpLim"
CCCS -> text "CCCS"
CurrentTSO -> text "CurrentTSO"
CurrentNursery -> text "CurrentNursery"
HpAlloc -> text "HpAlloc"
BaseReg -> text "BaseReg"
EagerBlackholeInfo -> text "stg_EAGER_BLACKHOLE_info"
GCEnter1 -> text "stg_gc_enter_1"
GCFun -> text "stg_gc_fun"
other -> panic $ "pprGlobalReg: Unsupported register: " ++ show other
pprLocalReg :: LocalReg -> SDoc
pprLocalReg (LocalReg uniq _) = char '_' <> ppr uniq
-- -----------------------------------------------------------------------------
-- Foreign Calls
pprCall :: SDoc -> CCallConv -> [Hinted CmmFormal] -> [Hinted CmmActual] -> SDoc
pprCall ppr_fn cconv results args
| not (is_cishCC cconv)
= panic $ "pprCall: unknown calling convention"
| otherwise
=
ppr_assign results (ppr_fn <> parens (commafy (map pprArg args))) <> semi
where
ppr_assign [] rhs = rhs
ppr_assign [(one,hint)] rhs
= pprLocalReg one <> text " = "
<> pprUnHint hint (localRegType one) <> rhs
ppr_assign _other _rhs = panic "pprCall: multiple results"
pprArg (expr, AddrHint)
= cCast (text "void *") expr
-- see comment by machRepHintCType below
pprArg (expr, SignedHint)
= sdocWithDynFlags $ \dflags ->
cCast (machRep_S_CType $ typeWidth $ cmmExprType dflags expr) expr
pprArg (expr, _other)
= pprExpr expr
pprUnHint AddrHint rep = parens (machRepCType rep)
pprUnHint SignedHint rep = parens (machRepCType rep)
pprUnHint _ _ = empty
-- Currently we only have these two calling conventions, but this might
-- change in the future...
is_cishCC :: CCallConv -> Bool
is_cishCC CCallConv = True
is_cishCC CApiConv = True
is_cishCC StdCallConv = True
is_cishCC PrimCallConv = False
is_cishCC JavaScriptCallConv = False
-- ---------------------------------------------------------------------
-- Find and print local and external declarations for a list of
-- Cmm statements.
--
pprTempAndExternDecls :: [CmmBlock] -> (SDoc{-temps-}, SDoc{-externs-})
pprTempAndExternDecls stmts
= (pprUFM (getUniqSet temps) (vcat . map pprTempDecl),
vcat (map pprExternDecl (Map.keys lbls)))
where (temps, lbls) = runTE (mapM_ te_BB stmts)
pprDataExterns :: [CmmStatic] -> SDoc
pprDataExterns statics
= vcat (map pprExternDecl (Map.keys lbls))
where (_, lbls) = runTE (mapM_ te_Static statics)
pprTempDecl :: LocalReg -> SDoc
pprTempDecl l@(LocalReg _ rep)
= hcat [ machRepCType rep, space, pprLocalReg l, semi ]
pprExternDecl :: CLabel -> SDoc
pprExternDecl lbl
-- do not print anything for "known external" things
| not (needsCDecl lbl) = empty
| Just sz <- foreignLabelStdcallInfo lbl = stdcall_decl sz
| otherwise =
hcat [ visibility, label_type lbl , lparen, ppr lbl, text ");"
-- occasionally useful to see label type
-- , text "/* ", pprDebugCLabel lbl, text " */"
]
where
label_type lbl | isBytesLabel lbl = text "B_"
| isForeignLabel lbl && isCFunctionLabel lbl
= text "FF_"
| isCFunctionLabel lbl = text "F_"
| isStaticClosureLabel lbl = text "C_"
-- generic .rodata labels
| isSomeRODataLabel lbl = text "RO_"
-- generic .data labels (common case)
| otherwise = text "RW_"
visibility
| externallyVisibleCLabel lbl = char 'E'
| otherwise = char 'I'
-- If the label we want to refer to is a stdcall function (on Windows) then
-- we must generate an appropriate prototype for it, so that the C compiler will
-- add the @n suffix to the label (#2276)
stdcall_decl sz = sdocWithDynFlags $ \dflags ->
text "extern __attribute__((stdcall)) void " <> ppr lbl
<> parens (commafy (replicate (sz `quot` wORD_SIZE dflags) (machRep_U_CType (wordWidth dflags))))
<> semi
type TEState = (UniqSet LocalReg, Map CLabel ())
newtype TE a = TE { unTE :: TEState -> (a, TEState) } deriving (Functor)
instance Applicative TE where
pure a = TE $ \s -> (a, s)
(<*>) = ap
instance Monad TE where
TE m >>= k = TE $ \s -> case m s of (a, s') -> unTE (k a) s'
te_lbl :: CLabel -> TE ()
te_lbl lbl = TE $ \(temps,lbls) -> ((), (temps, Map.insert lbl () lbls))
te_temp :: LocalReg -> TE ()
te_temp r = TE $ \(temps,lbls) -> ((), (addOneToUniqSet temps r, lbls))
runTE :: TE () -> TEState
runTE (TE m) = snd (m (emptyUniqSet, Map.empty))
te_Static :: CmmStatic -> TE ()
te_Static (CmmStaticLit lit) = te_Lit lit
te_Static _ = return ()
te_BB :: CmmBlock -> TE ()
te_BB block = mapM_ te_Stmt (blockToList mid) >> te_Stmt last
where (_, mid, last) = blockSplit block
te_Lit :: CmmLit -> TE ()
te_Lit (CmmLabel l) = te_lbl l
te_Lit (CmmLabelOff l _) = te_lbl l
te_Lit (CmmLabelDiffOff l1 _ _ _) = te_lbl l1
te_Lit _ = return ()
te_Stmt :: CmmNode e x -> TE ()
te_Stmt (CmmAssign r e) = te_Reg r >> te_Expr e
te_Stmt (CmmStore l r) = te_Expr l >> te_Expr r
te_Stmt (CmmUnsafeForeignCall target rs es)
= do te_Target target
mapM_ te_temp rs
mapM_ te_Expr es
te_Stmt (CmmCondBranch e _ _ _) = te_Expr e
te_Stmt (CmmSwitch e _) = te_Expr e
te_Stmt (CmmCall { cml_target = e }) = te_Expr e
te_Stmt _ = return ()
te_Target :: ForeignTarget -> TE ()
te_Target (ForeignTarget e _) = te_Expr e
te_Target (PrimTarget{}) = return ()
te_Expr :: CmmExpr -> TE ()
te_Expr (CmmLit lit) = te_Lit lit
te_Expr (CmmLoad e _) = te_Expr e
te_Expr (CmmReg r) = te_Reg r
te_Expr (CmmMachOp _ es) = mapM_ te_Expr es
te_Expr (CmmRegOff r _) = te_Reg r
te_Expr (CmmStackSlot _ _) = panic "te_Expr: CmmStackSlot not supported!"
te_Reg :: CmmReg -> TE ()
te_Reg (CmmLocal l) = te_temp l
te_Reg _ = return ()
-- ---------------------------------------------------------------------
-- C types for MachReps
cCast :: SDoc -> CmmExpr -> SDoc
cCast ty expr = parens ty <> pprExpr1 expr
cLoad :: CmmExpr -> CmmType -> SDoc
cLoad expr rep
= sdocWithPlatform $ \platform ->
if bewareLoadStoreAlignment (platformArch platform)
then let decl = machRepCType rep <+> text "x" <> semi
struct = text "struct" <+> braces (decl)
packed_attr = text "__attribute__((packed))"
cast = parens (struct <+> packed_attr <> char '*')
in parens (cast <+> pprExpr1 expr) <> text "->x"
else char '*' <> parens (cCast (machRepPtrCType rep) expr)
where -- On these platforms, unaligned loads are known to cause problems
bewareLoadStoreAlignment ArchAlpha = True
bewareLoadStoreAlignment ArchMipseb = True
bewareLoadStoreAlignment ArchMipsel = True
bewareLoadStoreAlignment (ArchARM {}) = True
bewareLoadStoreAlignment ArchARM64 = True
bewareLoadStoreAlignment ArchSPARC = True
bewareLoadStoreAlignment ArchSPARC64 = True
-- Pessimistically assume that they will also cause problems
-- on unknown arches
bewareLoadStoreAlignment ArchUnknown = True
bewareLoadStoreAlignment _ = False
isCmmWordType :: DynFlags -> CmmType -> Bool
-- True of GcPtrReg/NonGcReg of native word size
isCmmWordType dflags ty = not (isFloatType ty)
&& typeWidth ty == wordWidth dflags
-- This is for finding the types of foreign call arguments. For a pointer
-- argument, we always cast the argument to (void *), to avoid warnings from
-- the C compiler.
machRepHintCType :: CmmType -> ForeignHint -> SDoc
machRepHintCType _ AddrHint = text "void *"
machRepHintCType rep SignedHint = machRep_S_CType (typeWidth rep)
machRepHintCType rep _other = machRepCType rep
machRepPtrCType :: CmmType -> SDoc
machRepPtrCType r
= sdocWithDynFlags $ \dflags ->
if isCmmWordType dflags r then text "P_"
else machRepCType r <> char '*'
machRepCType :: CmmType -> SDoc
machRepCType ty | isFloatType ty = machRep_F_CType w
| otherwise = machRep_U_CType w
where
w = typeWidth ty
machRep_F_CType :: Width -> SDoc
machRep_F_CType W32 = text "StgFloat" -- ToDo: correct?
machRep_F_CType W64 = text "StgDouble"
machRep_F_CType _ = panic "machRep_F_CType"
machRep_U_CType :: Width -> SDoc
machRep_U_CType w
= sdocWithDynFlags $ \dflags ->
case w of
_ | w == wordWidth dflags -> text "W_"
W8 -> text "StgWord8"
W16 -> text "StgWord16"
W32 -> text "StgWord32"
W64 -> text "StgWord64"
_ -> panic "machRep_U_CType"
machRep_S_CType :: Width -> SDoc
machRep_S_CType w
= sdocWithDynFlags $ \dflags ->
case w of
_ | w == wordWidth dflags -> text "I_"
W8 -> text "StgInt8"
W16 -> text "StgInt16"
W32 -> text "StgInt32"
W64 -> text "StgInt64"
_ -> panic "machRep_S_CType"
-- ---------------------------------------------------------------------
-- print strings as valid C strings
pprStringInCStyle :: ByteString -> SDoc
pprStringInCStyle s = doubleQuotes (text (concatMap charToC (BS.unpack s)))
-- ---------------------------------------------------------------------------
-- Initialising static objects with floating-point numbers. We can't
-- just emit the floating point number, because C will cast it to an int
-- by rounding it. We want the actual bit-representation of the float.
--
-- Consider a concrete C example:
-- double d = 2.5e-10;
-- float f = 2.5e-10f;
--
-- int * i2 = &d; printf ("i2: %08X %08X\n", i2[0], i2[1]);
-- long long * l = &d; printf (" l: %016llX\n", l[0]);
-- int * i = &f; printf (" i: %08X\n", i[0]);
-- Result on 64-bit LE (x86_64):
-- i2: E826D695 3DF12E0B
-- l: 3DF12E0BE826D695
-- i: 2F89705F
-- Result on 32-bit BE (m68k):
-- i2: 3DF12E0B E826D695
-- l: 3DF12E0BE826D695
-- i: 2F89705F
--
-- The trick here is to notice that binary representation does not
-- change much: only Word32 values get swapped on LE hosts / targets.
-- This is a hack to turn the floating point numbers into ints that we
-- can safely initialise to static locations.
castFloatToWord32Array :: STUArray s Int Float -> ST s (STUArray s Int Word32)
castFloatToWord32Array = U.castSTUArray
castDoubleToWord64Array :: STUArray s Int Double -> ST s (STUArray s Int Word64)
castDoubleToWord64Array = U.castSTUArray
floatToWord :: DynFlags -> Rational -> CmmLit
floatToWord dflags r
= runST (do
arr <- newArray_ ((0::Int),0)
writeArray arr 0 (fromRational r)
arr' <- castFloatToWord32Array arr
w32 <- readArray arr' 0
return (CmmInt (toInteger w32 `shiftL` wo) (wordWidth dflags))
)
where wo | wordWidth dflags == W64
, wORDS_BIGENDIAN dflags = 32
| otherwise = 0
floatPairToWord :: DynFlags -> Rational -> Rational -> CmmLit
floatPairToWord dflags r1 r2
= runST (do
arr <- newArray_ ((0::Int),1)
writeArray arr 0 (fromRational r1)
writeArray arr 1 (fromRational r2)
arr' <- castFloatToWord32Array arr
w32_1 <- readArray arr' 0
w32_2 <- readArray arr' 1
return (pprWord32Pair w32_1 w32_2)
)
where pprWord32Pair w32_1 w32_2
| wORDS_BIGENDIAN dflags =
CmmInt ((shiftL i1 32) .|. i2) W64
| otherwise =
CmmInt ((shiftL i2 32) .|. i1) W64
where i1 = toInteger w32_1
i2 = toInteger w32_2
doubleToWords :: DynFlags -> Rational -> [CmmLit]
doubleToWords dflags r
= runST (do
arr <- newArray_ ((0::Int),1)
writeArray arr 0 (fromRational r)
arr' <- castDoubleToWord64Array arr
w64 <- readArray arr' 0
return (pprWord64 w64)
)
where targetWidth = wordWidth dflags
targetBE = wORDS_BIGENDIAN dflags
pprWord64 w64
| targetWidth == W64 =
[ CmmInt (toInteger w64) targetWidth ]
| targetWidth == W32 =
[ CmmInt (toInteger targetW1) targetWidth
, CmmInt (toInteger targetW2) targetWidth
]
| otherwise = panic "doubleToWords.pprWord64"
where (targetW1, targetW2)
| targetBE = (wHi, wLo)
| otherwise = (wLo, wHi)
wHi = w64 `shiftR` 32
wLo = w64 .&. 0xFFFFffff
-- ---------------------------------------------------------------------------
-- Utils
wordShift :: DynFlags -> Int
wordShift dflags = widthInLog (wordWidth dflags)
commafy :: [SDoc] -> SDoc
commafy xs = hsep $ punctuate comma xs
-- Print in C hex format: 0x13fa
pprHexVal :: Integer -> Width -> SDoc
pprHexVal w rep
| w < 0 = parens (char '-' <>
text "0x" <> intToDoc (-w) <> repsuffix rep)
| otherwise = text "0x" <> intToDoc w <> repsuffix rep
where
-- type suffix for literals:
-- Integer literals are unsigned in Cmm/C. We explicitly cast to
-- signed values for doing signed operations, but at all other
-- times values are unsigned. This also helps eliminate occasional
-- warnings about integer overflow from gcc.
repsuffix W64 = sdocWithDynFlags $ \dflags ->
if cINT_SIZE dflags == 8 then char 'U'
else if cLONG_SIZE dflags == 8 then text "UL"
else if cLONG_LONG_SIZE dflags == 8 then text "ULL"
else panic "pprHexVal: Can't find a 64-bit type"
repsuffix _ = char 'U'
intToDoc :: Integer -> SDoc
intToDoc i = case truncInt i of
0 -> char '0'
v -> go v
-- We need to truncate value as Cmm backend does not drop
-- redundant bits to ease handling of negative values.
-- Thus the following Cmm code on 64-bit arch, like amd64:
-- CInt v;
-- v = {something};
-- if (v == %lobits32(-1)) { ...
-- leads to the following C code:
-- StgWord64 v = (StgWord32)({something});
-- if (v == 0xFFFFffffFFFFffffU) { ...
-- Such code is incorrect as it promotes both operands to StgWord64
-- and the whole condition is always false.
truncInt :: Integer -> Integer
truncInt i =
case rep of
W8 -> i `rem` (2^(8 :: Int))
W16 -> i `rem` (2^(16 :: Int))
W32 -> i `rem` (2^(32 :: Int))
W64 -> i `rem` (2^(64 :: Int))
_ -> panic ("pprHexVal/truncInt: C backend can't encode "
++ show rep ++ " literals")
go 0 = empty
go w' = go q <> dig
where
(q,r) = w' `quotRem` 16
dig | r < 10 = char (chr (fromInteger r + ord '0'))
| otherwise = char (chr (fromInteger r - 10 + ord 'a'))
| sdiehl/ghc | compiler/GHC/CmmToC.hs | bsd-3-clause | 52,471 | 0 | 20 | 16,099 | 12,823 | 6,359 | 6,464 | 895 | 68 |
{-# LANGUAGE FlexibleContexts #-}
{-# LANGUAGE FunctionalDependencies #-}
{-# LANGUAGE GeneralizedNewtypeDeriving #-}
{-# LANGUAGE LambdaCase #-}
{-# LANGUAGE GADTs #-}
{-# LANGUAGE KindSignatures #-}
module Unification where
import Control.Applicative
import Control.Monad
import Control.Monad.ST
import Control.Monad.Except
import Data.Monoid
import Data.STRef
import qualified Data.Foldable as F
data Exp f a
= MutVar (Var f a)
| GenVar Int
| Op (f (Exp f a))
-- Unify {{{
newtype Unify s a = Unify
{ unUnify :: ExceptT String (ST s) a
} deriving
( Functor , Applicative , Monad
, Alternative , MonadPlus
)
liftST :: ST s a -> Unify s a
liftST = Unify . lift
instance MonadRef (STRef s) (Unify s) where
newRef = liftST . newRef
readRef = liftST . readRef
writeRef r = liftST . writeRef r
-- }}}
-- Unification {{{
type Var f a = STRef a (Maybe (Exp f a))
unify :: Unifiable f => Exp f a -> Exp f a -> Unify a ()
unify r s = (,) <$> prune r <*> prune s >>= \case
(MutVar x,u@(MutVar y)) -> unless (x == y) $ x ?~ u
(MutVar x,u ) -> occurs x u >>= throw "occurs check failed." ? (x ?~ u)
(t ,MutVar y) -> occurs y t >>= throw "occurs check failed." ? (y ?~ t)
(GenVar x,GenVar y) -> when (x == y) $ throw "different GenVars"
(Op t,Op u) -> unifyWith unify t u >>= \case
Just v -> return $ F.fold v
_ -> throw "subunification error"
_ -> throw "different types"
prune :: (MonadRef (STRef a) m, Functor f) => Exp f a -> m (Exp f a)
prune e = case e of
MutVar r -> readRef r >>= \case
Just t -> do
u <- prune t
r ?~ u
return u
_ -> return e
_ -> return e
occurs :: (MonadRef (STRef a) m, Functor f, Foldable f) => Var f a -> Exp f a -> m Bool
occurs r = prune >=> \case
MutVar s -> return $ r == s
GenVar _ -> return False
Op t -> getAny <$> foldMapM (fmap Any . occurs r) t
instantiate :: Functor f => [Exp f a] -> Exp f a -> Exp f a
instantiate ts x = case x of
MutVar _ -> x
GenVar n -> ts !! n
Op t -> Op $ instantiate ts <$> t
okay :: Unify s ()
okay = pure ()
throw :: String -> Unify s ()
throw = Unify . throwError
-- }}}
-- MonadRef {{{
class Monad m => MonadRef r m | m -> r where
newRef :: a -> m (r a)
readRef :: r a -> m a
writeRef :: r a -> a -> m ()
modifyRef :: r a -> (a -> a) -> m ()
modifyRef r f = do
a <- readRef r
writeRef r $ f a
instance MonadRef (STRef s) (ST s) where
newRef = newSTRef
readRef = readSTRef
writeRef = writeSTRef
(?~) :: MonadRef r m => r (Maybe a) -> a -> m ()
(?~) x = writeRef x . Just
infixr 5 ?~
(.~) :: MonadRef r m => r a -> a -> m ()
(.~) = writeRef
infixr 5 .~
ref :: MonadRef r m => r a -> m a
ref = readRef
-- }}}
-- Unifiable {{{
class Traversable t => Unifiable t where
unifyWith :: Applicative f => (a -> b -> f c) -> t a -> t b -> f (Maybe (t c))
instance Unifiable [] where
unifyWith f = \case
a:as -> \case
b:bs -> fmap . (:) <$> f a b <*> unifyWith f as bs
_ -> bad
[] -> \case
[] -> good []
_ -> bad
instance Unifiable Maybe where
unifyWith f = \case
Just a -> \case
Just b -> Just . Just <$> f a b
_ -> bad
Nothing -> \case
Nothing -> good Nothing
_ -> bad
instance Eq e => Unifiable (Either e) where
unifyWith f = \case
Left x -> \case
Left y | x == y -> good $ Left x
_ -> bad
Right a -> \case
Right b -> Just . Right <$> f a b
_ -> bad
instance Eq e => Unifiable ((,) e) where
unifyWith f (x,a) (y,b) = if x == y
then Just . (,) x <$> f a b
else bad
good :: Applicative f => a -> f (Maybe a)
good = pure . pure
bad :: Applicative f => f (Maybe a)
bad = pure empty
-- }}}
-- Util {{{
(?) :: a -> a -> Bool -> a
(?) t f b = if b then t else f
infixl 3 ?
foldMapM :: (Monad f, Foldable t, Monoid m) => (a -> f m) -> t a -> f m
foldMapM f = F.foldrM (\a m -> mappend <$> f a <*> pure m) mempty
-- }}}
| kylcarte/cardelli | src/Unification.hs | bsd-3-clause | 4,077 | 0 | 15 | 1,253 | 1,909 | 959 | 950 | -1 | -1 |
module LevelBuilder where
import Control.Category
import Control.Lens
import Data.Default
import Data.Map.Strict as Map
import Prelude hiding (Either(..), id, (.))
import Coord
import Entity
import GameState
(<>)
:: Monoid m
=> m -> m -> m
(<>) = mappend
data Tile
= Floor
| Wall
| LitColumn
deriving (Show,Eq)
instance Monoid Tile where
mempty = Floor
mappend x Floor = x
mappend Floor x = x
mappend x y = x
newtype LevelBuilder = LevelBuilder
{ asMap :: CoordMap Tile
}
instance Monoid LevelBuilder where
mempty = LevelBuilder Map.empty
mappend x y = LevelBuilder $ Map.unionWith (<>) (asMap x) (asMap y)
mconcat list = LevelBuilder $ Map.unionsWith (<>) (fmap asMap list)
setTile :: (Coord, Tile) -> LevelBuilder -> LevelBuilder
setTile (coord,tileType) (LevelBuilder builder) =
LevelBuilder $ Map.insert coord tileType builder
mkTiles :: Tile -> [Coord] -> LevelBuilder
mkTiles tileType = Prelude.foldr set mempty
where
set coord = setTile (coord, tileType)
mkFloors :: Bounds -> LevelBuilder
mkFloors bounds = mkTiles Floor $ coordsWithin bounds
mkBounds :: Bounds -> LevelBuilder
mkBounds bounds = mkTiles Wall $ borderCoords bounds
mkRoom :: Bounds -> LevelBuilder
mkRoom bounds = (mkFloors bounds) <> (mkBounds bounds)
conflict :: LevelBuilder -> LevelBuilder -> Bool
conflict (LevelBuilder builder1) (LevelBuilder builder2) =
Map.size intersect /= 0
where
intersect = Map.intersection builder1 builder2
mergeLevelBuilder :: LevelBuilder -> GameState -> GameState
mergeLevelBuilder builder state = addEntitiesToCurrLevel entities state
where
entities = mkEntitiesFrom builder
mkEntitiesFrom :: LevelBuilder -> [Entity]
mkEntitiesFrom (LevelBuilder builder) = fmap mkEntityFrom (toList builder)
mkEntityFrom :: (Coord, Tile) -> Entity
mkEntityFrom (coord,Wall) = mkWall coord
mkEntityFrom (coord,Floor) = mkFloor coord
mkEntityFrom (coord,LitColumn) = mkLitColumn coord
mkLevel :: GameState
mkLevel = mergeLevelBuilder level $ mkGameState (Coord 15 10)
where
level = mkRoom (Bounds (Coord 0 0) (Coord 30 20)) <> mkTiles LitColumn [Coord 5 5, Coord 11 9]
| fros1y/umbral | src/LevelBuilder.hs | bsd-3-clause | 2,169 | 0 | 12 | 404 | 730 | 389 | 341 | 57 | 1 |
module Test.Ninja(main) where
import Development.Shake
import qualified Development.Shake.Config as Config
import System.Directory(copyFile, removeFile)
import Control.Monad
import General.GetOpt
import General.Extra
import Test.Type
import qualified Data.HashMap.Strict as Map
import Data.List.Extra
import System.IO.Extra
import qualified Run
import System.Environment
opts = Option "" ["arg"] (ReqArg Right "") ""
-- | Set to True to test with real Ninja
-- On Windows doesn't work because echo foo > 1 isn't supported
real_ninja = False
main = testBuildArgs test [opts] $ \opts -> do
let real = "real" `elem` opts
action $ if real || real_ninja
then cmd "ninja" opts
else liftIO $ withArgs ("--lint":"--report=report.html":opts) Run.main
test build = do
let runEx ninja shake = build $ "--exception" : "--no-report" : map ("--arg=" ++) (words ninja) ++ words shake
let run ninja = runEx ninja []
let runFail ninja bad = assertException [bad] $ runEx ninja "--quiet"
build ["clean"]
run "-f../../src/Test/Ninja/test1.ninja"
assertExists "out1.txt"
run "-f../../src/Test/Ninja/test2.ninja"
assertExists "out2.2"
assertMissing "out2.1"
build ["clean"]
run "-f../../src/Test/Ninja/test2.ninja out2.1"
assertExists "out2.1"
assertMissing "out2.2"
copyFile "../../src/Test/Ninja/test3-sub.ninja" "test3-sub.ninja"
copyFile "../../src/Test/Ninja/test3-inc.ninja" "test3-inc.ninja"
createDirectoryRecursive "subdir"
copyFile "../../src/Test/Ninja/subdir/1.ninja" "subdir/1.ninja"
copyFile "../../src/Test/Ninja/subdir/2.ninja" "subdir/2.ninja"
run "-f../../src/Test/Ninja/test3.ninja"
assertContentsWords "out3.1" "g4+b1+++i1"
assertContentsWords "out3.2" "g4++++i1"
assertContentsWords "out3.3" "g4++++i1"
assertContentsWords "out3.4" "g4+++s1+s2"
run "-f../../src/Test/Ninja/test4.ninja out"
assertExists "out.txt"
assertExists "out2.txt"
run "-f../../src/Test/Ninja/test5.ninja"
assertExists "output file"
-- #565, check multi-file rules that don't create their contents
run "-f../../src/Test/Ninja/test7.ninja"
writeFile "nocreate.log" ""
writeFile "nocreate.in" ""
run "-f../../src/Test/Ninja/nocreate.ninja"
assertContentsWords "nocreate.log" "x"
run "-f../../src/Test/Ninja/nocreate.ninja"
run "-f../../src/Test/Ninja/nocreate.ninja"
assertContentsWords "nocreate.log" "x x x"
writeFile "input" ""
runFail "-f../../src/Test/Ninja/lint.ninja bad --lint" "'needed' file required rebuilding"
run "-f../../src/Test/Ninja/lint.ninja good --lint"
runFail "-f../../src/Test/Ninja/lint.ninja bad --lint" "not a pre-dependency"
res <- fmap (drop1 . lines . fst) $ captureOutput $ runEx "-f../../src/Test/Ninja/compdb.ninja -t compdb cxx" "--quiet"
want <- lines <$> readFile "../../src/Test/Ninja/compdb.output"
let eq a b | (a1,'*':a2) <- break (== '*') a = unless (a1 `isPrefixOf` b && a2 `isSuffixOf` b) $ a === b
| otherwise = a === b
length want === length res
zipWithM_ eq want res
-- Test initial variable bindings and variables in include/subninja statements
let test6 = "test6"
copyFile "../../src/Test/Ninja/test6-sub.ninja" $ test6 ++ "-sub.ninja"
copyFile "../../src/Test/Ninja/test6-inc.ninja" $ test6 ++ "-inc.ninja"
copyFile "../../src/Test/Ninja/test6.ninja" $ test6 ++ ".ninja"
config <- Config.readConfigFileWithEnv [("v1", test6)] $ test6 ++ ".ninja"
-- The file included by subninja should have a separate variable scope
Map.lookup "v2" config === Just "g2"
run "-f../../src/Test/Ninja/phonyorder.ninja bar.txt"
-- tests from ninjasmith: https://github.com/ndmitchell/ninjasmith/
run "-f../../src/Test/Ninja/redefine.ninja"
assertContentsWords "redefine.txt" "version3 version2"
run "-f../../src/Test/Ninja/buildseparate.ninja"
assertContentsWords "buildseparate.txt" "XX"
run "-f../../src/Test/Ninja/lexical.ninja"
assertContentsWords "lexical.txt" "XFoo_BarXXFooX.bar"
run "-f../../src/Test/Ninja/continuations.ninja"
assertExists "continuations.txt"
copyFile "../../src/Test/Ninja/restart.ninja" "restart.ninja"
runEx "-frestart.ninja" "--sleep"
assertExists "restart.txt"
createDirectoryRecursive "directory1"
createDirectoryRecursive "directory2"
run "-f../../src/Test/Ninja/allow_directory.ninja"
when False $ do
-- currently fails because Shake doesn't match Ninja here
run "-f../../src/Test/Ninja/outputtouch.ninja"
assertContentsWords "outputtouch.txt" "hello"
writeFile "outputtouch.txt" "goodbye"
run "-f../../src/Test/Ninja/outputtouch.ninja"
assertContentsWords "outputtouch.txt" "goodbye"
removeFile "outputtouch.txt"
run "-f../../src/Test/Ninja/outputtouch.ninja"
assertContentsWords "outputtouch.txt" "hello"
| ndmitchell/shake | src/Test/Ninja.hs | bsd-3-clause | 4,964 | 0 | 17 | 877 | 990 | 443 | 547 | 97 | 2 |
#!/usr/bin/env runghc
-- NB: This code deliberately avoids relying on non-standard packages
import Control.Monad
import Data.List
import System.Environment
import System.Exit
import System.IO
import Distribution.PackageDescription.Parse (readPackageDescription)
import Distribution.PackageDescription (packageDescription, testedWith)
import Distribution.Compiler (CompilerFlavor(..))
import Distribution.Version
import Distribution.Text
putStrLnErr :: String -> IO ()
putStrLnErr m = hPutStrLn stderr ("*ERROR* " ++ m) >> exitFailure
putStrLnWarn :: String -> IO ()
putStrLnWarn m = hPutStrLn stderr ("*WARNING* " ++ m)
putStrLnInfo :: String -> IO ()
putStrLnInfo m = hPutStrLn stderr ("*INFO* " ++ m)
main :: IO ()
main = do
args <- getArgs
case args of
(cabfn:xpkgs) -> do genTravisFromCabalFile cabfn xpkgs
_ -> putStrLnErr (unlines $ [ "expected .cabal file as command-line argument"
, "Usage: make_travis_yml.hs <cabal-file> <extra-apt-packages...>"
, ""
, "Example: make_travis_yml.hs someProject.cabal alex-3.1.4 liblzma-dev > .travis.yml"
])
genTravisFromCabalFile :: FilePath -> [String] -> IO ()
genTravisFromCabalFile fn xpkgs = do
gpd <- readPackageDescription maxBound fn
let compilers = testedWith $ packageDescription $ gpd
let unknownComps = nub [ c | (c,_) <- compilers, c /= GHC ]
ghcVerConstrs = [ vc | (GHC,vc) <- compilers ]
ghcVerConstrs' = simplifyVersionRange $ foldr unionVersionRanges noVersion ghcVerConstrs
when (null compilers) $ do
putStrLnErr "empty or missing 'tested-with:' definition in .cabal file"
unless (null unknownComps) $ do
putStrLnWarn $ "ignoring unsupported compilers mentioned in tested-with: " ++ show unknownComps
when (null ghcVerConstrs) $ do
putStrLnErr "'tested-with:' doesn't mention any 'GHC' version"
when (isNoVersion ghcVerConstrs') $ do
putStrLnErr "'tested-with:' describes an empty version range for 'GHC'"
when (isAnyVersion ghcVerConstrs') $ do
putStrLnErr "'tested-with:' allows /any/ 'GHC' version"
let testedGhcVersions = filter (`withinRange` ghcVerConstrs') knownGhcVersions
when (null testedGhcVersions) $ do
putStrLnErr "no known GHC version is allowed by the 'tested-with' specification"
putStrLnInfo $ "Generating Travis-CI config for testing for GHC versions: " ++ (unwords $ map disp' $ testedGhcVersions)
----------------------------------------------------------------------------
-- travis.yml generation starts here
putStrLn "# This file has been generated -- see https://github.com/hvr/multi-ghc-travis"
putStrLn "language: c"
putStrLn "sudo: false"
putStrLn ""
putStrLn "cache:"
putStrLn " directories:"
putStrLn " - $HOME/.cabsnap"
putStrLn " - $HOME/.cabal/packages"
putStrLn ""
putStrLn "before_cache:"
putStrLn " - rm -fv $HOME/.cabal/packages/hackage.haskell.org/build-reports.log"
putStrLn " - rm -fv $HOME/.cabal/packages/hackage.haskell.org/00-index.tar"
putStrLn ""
putStrLn "matrix:"
putStrLn " include:"
forM_ testedGhcVersions $ \gv -> do
let cvs = disp' (lookupCabVer gv)
gvs = disp' gv
xpkgs' = concatMap (',':) xpkgs
putStrLn $ concat [ " - env: CABALVER=", cvs, " GHCVER=", gvs ]
putStrLn $ concat [ " compiler: \": #GHC ", gvs, "\"" ]
putStrLn $ concat [ " addons: {apt: {packages: [cabal-install-", cvs, ",ghc-", gvs, xpkgs'
, "], sources: [hvr-ghc]}}" ]
return ()
let headGhcVers = filter isHead testedGhcVersions
unless (null headGhcVers) $ do
putStrLn ""
putStrLn " allow_failures:"
forM_ headGhcVers $ \gv -> do
let cvs = disp' (lookupCabVer gv)
gvs = disp' gv
putStrLn $ concat [ " - env: CABALVER=", cvs, " GHCVER=", gvs ]
putStrLn ""
putStrLn "before_install:"
putStrLn " - unset CC"
putStrLn " - export PATH=/opt/ghc/$GHCVER/bin:/opt/cabal/$CABALVER/bin:$PATH"
putStrLn ""
putStr $ unlines
[ "install:"
, " - cabal --version"
, " - echo \"$(ghc --version) [$(ghc --print-project-git-commit-id 2> /dev/null || echo '?')]\""
, " - if [ -f $HOME/.cabal/packages/hackage.haskell.org/00-index.tar.gz ];"
, " then"
, " zcat $HOME/.cabal/packages/hackage.haskell.org/00-index.tar.gz >"
, " $HOME/.cabal/packages/hackage.haskell.org/00-index.tar;"
, " fi"
, " - travis_retry cabal update -v"
, " - sed -i 's/^jobs:/-- jobs:/' ${HOME}/.cabal/config"
, " - cabal install --only-dependencies --enable-tests --enable-benchmarks --dry -v > installplan.txt"
, " - sed -i -e '1,/^Resolving /d' installplan.txt; cat installplan.txt"
, ""
, "# check whether current requested install-plan matches cached package-db snapshot"
, " - if diff -u installplan.txt $HOME/.cabsnap/installplan.txt;"
, " then"
, " echo \"cabal build-cache HIT\";"
, " rm -rfv .ghc;"
, " cp -a $HOME/.cabsnap/ghc $HOME/.ghc;"
, " cp -a $HOME/.cabsnap/lib $HOME/.cabsnap/share $HOME/.cabsnap/bin $HOME/.cabal/;"
, " else"
, " echo \"cabal build-cache MISS\";"
, " rm -rf $HOME/.cabsnap;"
, " mkdir -p $HOME/.ghc $HOME/.cabal/lib $HOME/.cabal/share $HOME/.cabal/bin;"
, " cabal install --only-dependencies --enable-tests --enable-benchmarks;"
, " fi"
, ""
, "# snapshot package-db on cache miss"
, " - if [ ! -d $HOME/.cabsnap ];"
, " then"
, " echo \"snapshotting package-db to build-cache\";"
, " mkdir $HOME/.cabsnap;"
, " cp -a $HOME/.ghc $HOME/.cabsnap/ghc;"
, " cp -a $HOME/.cabal/lib $HOME/.cabal/share $HOME/.cabal/bin installplan.txt $HOME/.cabsnap/;"
, " fi"
, ""
, "# Here starts the actual work to be performed for the package under test;"
, "# any command which exits with a non-zero exit code causes the build to fail."
, "script:"
, " - if [ -f configure.ac ]; then autoreconf -i; fi"
, " - cabal configure --enable-tests --enable-benchmarks -v2 # -v2 provides useful information for debugging"
, " - cabal build # this builds all libraries and executables (including tests/benchmarks)"
, " - cabal test"
, " - cabal check"
, " - cabal sdist # tests that a source-distribution can be generated"
, ""
, "# Check that the resulting source distribution can be built & installed."
, "# If there are no other `.tar.gz` files in `dist`, this can be even simpler:"
, "# `cabal install --force-reinstalls dist/*-*.tar.gz`"
, " - SRC_TGZ=$(cabal info . | awk '{print $2;exit}').tar.gz &&"
, " (cd dist && cabal install --force-reinstalls \"$SRC_TGZ\")"
, ""
, "# EOF"
]
return ()
where
knownGhcVersions :: [Version]
knownGhcVersions = fmap (`Version` [])
[ [7,0,1], [7,0,2], [7,0,3], [7,0,4]
, [7,2,1], [7,2,2]
, [7,4,1], [7,4,2]
, [7,6,1], [7,6,2], [7,6,3]
, [7,8,1], [7,8,2], [7,8,3], [7,8,4]
, [7,10,1], [7,10,2], [7,10,3]
, [7,11] -- HEAD
]
lookupCabVer :: Version -> Version
lookupCabVer (Version (x:y:_) _) = maybe (error "internal error") id $ lookup (x,y) cabalVerMap
where
cabalVerMap = fmap (fmap (`Version` []))
[ ((7, 0), [1,16])
, ((7, 2), [1,16])
, ((7, 4), [1,16])
, ((7, 6), [1,16])
, ((7, 8), [1,18])
, ((7,10), [1,22])
, ((7,11), [1,23]) -- HEAD
]
isHead (Version (_:y:_) _) = odd (y :: Int)
disp' v | isHead v = "head"
| otherwise = display v | davidlazar/hpygments | gen-travis-yml.hs | mit | 8,349 | 0 | 16 | 2,496 | 1,671 | 906 | 765 | 160 | 2 |
{- DATX02-17-26, automated assessment of imperative programs.
- Copyright, 2017, see AUTHORS.md.
-
- This program is free software; you can redistribute it and/or
- modify it under the terms of the GNU General Public License
- as published by the Free Software Foundation; either version 2
- of the License, or (at your option) any later version.
-
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- GNU General Public License for more details.
-
- You should have received a copy of the GNU General Public License
- along with this program; if not, write to the Free Software
- Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
-}
-- | Test for eliminating redundant blocks and statements
module Norm.CompAssignmentTest (
allTests
) where
import Norm.NormTestUtil
import Norm.CompAssignment
normalizers :: NormalizerCU
normalizers = [ normCompAss ]
allTests :: TestTree
allTests = normTestsDir "Norm.CompAssignment" "compassignment" [normalizers]
| DATX02-17-26/DATX02-17-26 | Test/Norm/CompAssignmentTest.hs | gpl-2.0 | 1,150 | 0 | 6 | 204 | 56 | 34 | 22 | 8 | 1 |
{-# LANGUAGE OverloadedStrings #-}
{-# LANGUAGE TypeFamilies #-}
{-# OPTIONS_GHC -fno-warn-unused-imports #-}
-- Derived from AWS service descriptions, licensed under Apache 2.0.
-- |
-- Module : Network.AWS.CloudTrail.Waiters
-- Copyright : (c) 2013-2015 Brendan Hay
-- License : Mozilla Public License, v. 2.0.
-- Maintainer : Brendan Hay <brendan.g.hay@gmail.com>
-- Stability : auto-generated
-- Portability : non-portable (GHC extensions)
--
module Network.AWS.CloudTrail.Waiters where
import Network.AWS.CloudTrail.Types
import Network.AWS.Prelude
import Network.AWS.Waiter
| fmapfmapfmap/amazonka | amazonka-cloudtrail/gen/Network/AWS/CloudTrail/Waiters.hs | mpl-2.0 | 633 | 0 | 4 | 122 | 39 | 31 | 8 | 7 | 0 |
-- Module : Network.AWS.KMS
-- Copyright : (c) 2013-2014 Brendan Hay <brendan.g.hay@gmail.com>
-- License : This Source Code Form is subject to the terms of
-- the Mozilla Public License, v. 2.0.
-- A copy of the MPL can be found in the LICENSE file or
-- you can obtain it at http://mozilla.org/MPL/2.0/.
-- Maintainer : Brendan Hay <brendan.g.hay@gmail.com>
-- Stability : experimental
-- Portability : non-portable (GHC extensions)
--
-- Derived from AWS service descriptions, licensed under Apache 2.0.
-- | Amazon Key Management Service (KMS) is a managed service that makes it easy
-- for you to create and control the encryption keys used to encrypt your data,
-- and uses Hardware Security Modules (HSMs) to protect the security of your
-- keys. Amazon Key Management Service is integrated with other Amazon services
-- including Amazon EBS, Amazon S3, and Amazon Redshift. Amazon Key Management
-- Service is also integrated with Amazon CloudTrail to provide you with logs of
-- all key usage to help meet your regulatory and compliance needs.
module Network.AWS.KMS
( module Network.AWS.KMS.CreateAlias
, module Network.AWS.KMS.CreateGrant
, module Network.AWS.KMS.CreateKey
, module Network.AWS.KMS.Decrypt
, module Network.AWS.KMS.DeleteAlias
, module Network.AWS.KMS.DescribeKey
, module Network.AWS.KMS.DisableKey
, module Network.AWS.KMS.DisableKeyRotation
, module Network.AWS.KMS.EnableKey
, module Network.AWS.KMS.EnableKeyRotation
, module Network.AWS.KMS.Encrypt
, module Network.AWS.KMS.GenerateDataKey
, module Network.AWS.KMS.GenerateDataKeyWithoutPlaintext
, module Network.AWS.KMS.GenerateRandom
, module Network.AWS.KMS.GetKeyPolicy
, module Network.AWS.KMS.GetKeyRotationStatus
, module Network.AWS.KMS.ListAliases
, module Network.AWS.KMS.ListGrants
, module Network.AWS.KMS.ListKeyPolicies
, module Network.AWS.KMS.ListKeys
, module Network.AWS.KMS.PutKeyPolicy
, module Network.AWS.KMS.ReEncrypt
, module Network.AWS.KMS.RetireGrant
, module Network.AWS.KMS.RevokeGrant
, module Network.AWS.KMS.Types
, module Network.AWS.KMS.UpdateKeyDescription
) where
import Network.AWS.KMS.CreateAlias
import Network.AWS.KMS.CreateGrant
import Network.AWS.KMS.CreateKey
import Network.AWS.KMS.Decrypt
import Network.AWS.KMS.DeleteAlias
import Network.AWS.KMS.DescribeKey
import Network.AWS.KMS.DisableKey
import Network.AWS.KMS.DisableKeyRotation
import Network.AWS.KMS.EnableKey
import Network.AWS.KMS.EnableKeyRotation
import Network.AWS.KMS.Encrypt
import Network.AWS.KMS.GenerateDataKey
import Network.AWS.KMS.GenerateDataKeyWithoutPlaintext
import Network.AWS.KMS.GenerateRandom
import Network.AWS.KMS.GetKeyPolicy
import Network.AWS.KMS.GetKeyRotationStatus
import Network.AWS.KMS.ListAliases
import Network.AWS.KMS.ListGrants
import Network.AWS.KMS.ListKeyPolicies
import Network.AWS.KMS.ListKeys
import Network.AWS.KMS.PutKeyPolicy
import Network.AWS.KMS.ReEncrypt
import Network.AWS.KMS.RetireGrant
import Network.AWS.KMS.RevokeGrant
import Network.AWS.KMS.Types
import Network.AWS.KMS.UpdateKeyDescription
| kim/amazonka | amazonka-kms/gen/Network/AWS/KMS.hs | mpl-2.0 | 3,200 | 0 | 5 | 473 | 417 | 310 | 107 | 53 | 0 |
import Data.List.Split
import Midi
main = write_music "test3.mid" 1 1 song
song = Parallel [under, over]
under = Higher (-24) $ Longer 4 $ Sequence [ Sequence chord, Longer 4 (Parallel chord) ]
over = Sequence (let x = take 8 major in x ++ reverse x)
chord = take 4 $ every 2 major
minor = concat $ iterate (map oct) [A,B,C,D,E,F,G]
major = drop 2 $ minor
oct = Higher 12
every n = map head . splitEvery n
| sordina/Midi | test/test3.hs | bsd-3-clause | 426 | 0 | 11 | 101 | 215 | 110 | 105 | 11 | 1 |
{-# OPTIONS #-}
-----------------------------------------------------------------------------
-- |
-- Module : Language.Python.Common.LexerUtils
-- Copyright : (c) 2009 Bernie Pope
-- License : BSD-style
-- Maintainer : bjpop@csse.unimelb.edu.au
-- Stability : experimental
-- Portability : ghc
--
-- Various utilities to support the Python lexer.
-----------------------------------------------------------------------------
module Language.Python.Common.LexerUtils where
import Control.Monad (liftM)
import Control.Monad.Error.Class (throwError)
import Data.List (foldl')
import Data.Map as Map hiding (null, map, foldl')
import Data.Word (Word8)
import Data.Char (ord)
import Numeric (readHex, readOct)
import Language.Python.Common.Token as Token
import Language.Python.Common.ParserMonad hiding (location)
import Language.Python.Common.SrcLocation
import Codec.Binary.UTF8.String as UTF8 (encode)
type Byte = Word8
-- Beginning of. BOF = beginning of file, BOL = beginning of line
data BO = BOF | BOL
-- Functions for building tokens
type StartCode = Int
type Action = SrcSpan -> Int -> String -> P Token
lineJoin :: Action
lineJoin span _len _str =
return $ LineJoinToken $ spanStartPoint span
endOfLine :: P Token -> Action
endOfLine lexToken span _len _str = do
setLastEOL $ spanStartPoint span
lexToken
bolEndOfLine :: P Token -> Int -> Action
bolEndOfLine lexToken bol span len inp = do
pushStartCode bol
endOfLine lexToken span len inp
dedentation :: P Token -> Action
dedentation lexToken span _len _str = do
topIndent <- getIndent
-- case compare (endCol span) topIndent of
case compare (startCol span) topIndent of
EQ -> do popStartCode
lexToken
LT -> do popIndent
return dedentToken
GT -> spanError span "indentation error"
indentation :: P Token -> Int -> BO -> Action
-- Check if we are at the EOF. If yes, we may need to generate a newline,
-- in case we came here from BOL (but not BOF).
indentation lexToken _dedentCode bo _loc _len [] = do
popStartCode
case bo of
BOF -> lexToken
BOL -> newlineToken
indentation lexToken dedentCode bo span _len _str = do
popStartCode
parenDepth <- getParenStackDepth
if parenDepth > 0
then lexToken
else do
topIndent <- getIndent
-- case compare (endCol span) topIndent of
case compare (startCol span) topIndent of
EQ -> case bo of
BOF -> lexToken
BOL -> newlineToken
LT -> do pushStartCode dedentCode
newlineToken
-- GT -> do pushIndent (endCol span)
GT -> do pushIndent (startCol span)
return indentToken
where
indentToken = IndentToken span
symbolToken :: (SrcSpan -> Token) -> Action
symbolToken mkToken location _ _ = return (mkToken location)
token :: (SrcSpan -> String -> a -> Token) -> (String -> a) -> Action
token mkToken read location len str
= return $ mkToken location literal (read literal)
where
literal = take len str
-- special tokens for the end of file and end of line
endOfFileToken :: Token
endOfFileToken = EOFToken SpanEmpty
dedentToken = DedentToken SpanEmpty
newlineToken :: P Token
newlineToken = do
loc <- getLastEOL
return $ NewlineToken loc
-- Test if we are at the end of the line or file
atEOLorEOF :: a -> AlexInput -> Int -> AlexInput -> Bool
atEOLorEOF _user _inputBeforeToken _tokenLength (_loc, _bs, inputAfterToken)
= null inputAfterToken || nextChar == '\n' || nextChar == '\r'
where
nextChar = head inputAfterToken
notEOF :: a -> AlexInput -> Int -> AlexInput -> Bool
notEOF _user _inputBeforeToken _tokenLength (_loc, _bs, inputAfterToken)
= not (null inputAfterToken)
readBinary :: String -> Integer
readBinary
= toBinary . drop 2
where
toBinary = foldl' acc 0
acc b '0' = 2 * b
acc b '1' = 2 * b + 1
readFloat :: String -> Double
readFloat str@('.':cs) = read ('0':readFloatRest str)
readFloat str = read (readFloatRest str)
readFloatRest :: String -> String
readFloatRest [] = []
readFloatRest ['.'] = ".0"
readFloatRest (c:cs) = c : readFloatRest cs
mkString :: (SrcSpan -> String -> Token) -> Action
mkString toToken loc len str = do
return $ toToken loc (take len str)
stringToken :: SrcSpan -> String -> Token
stringToken = StringToken
rawStringToken :: SrcSpan -> String -> Token
rawStringToken = StringToken
byteStringToken :: SrcSpan -> String -> Token
byteStringToken = ByteStringToken
unicodeStringToken :: SrcSpan -> String -> Token
unicodeStringToken = UnicodeStringToken
rawByteStringToken :: SrcSpan -> String -> Token
rawByteStringToken = ByteStringToken
openParen :: (SrcSpan -> Token) -> Action
openParen mkToken loc _len _str = do
let token = mkToken loc
pushParen token
return token
closeParen :: (SrcSpan -> Token) -> Action
closeParen mkToken loc _len _str = do
let token = mkToken loc
topParen <- getParen
case topParen of
Nothing -> spanError loc err1
Just open -> if matchParen open token
then popParen >> return token
else spanError loc err2
where
-- XXX fix these error messages
err1 = "Lexical error ! unmatched closing paren"
err2 = "Lexical error ! unmatched closing paren"
matchParen :: Token -> Token -> Bool
matchParen (LeftRoundBracketToken {}) (RightRoundBracketToken {}) = True
matchParen (LeftBraceToken {}) (RightBraceToken {}) = True
matchParen (LeftSquareBracketToken {}) (RightSquareBracketToken {}) = True
matchParen _ _ = False
-- -----------------------------------------------------------------------------
-- Functionality required by Alex
type AlexInput = (SrcLocation, -- current src location
[Byte], -- byte buffer for next character
String) -- input string
alexInputPrevChar :: AlexInput -> Char
alexInputPrevChar _ = error "alexInputPrevChar not used"
-- byte buffer should be empty here
alexGetChar :: AlexInput -> Maybe (Char, AlexInput)
alexGetChar (loc, [], input)
| null input = Nothing
| otherwise = seq nextLoc (Just (nextChar, (nextLoc, [], rest)))
where
nextChar = head input
rest = tail input
nextLoc = moveChar nextChar loc
alexGetChar (loc, _:_, _) = error "alexGetChar called with non-empty byte buffer"
-- mapFst :: (a -> b) -> (a, c) -> (b, c)
-- mapFst f (a, c) = (f a, c)
alexGetByte :: AlexInput -> Maybe (Byte, AlexInput)
-- alexGetByte = fmap (mapFst (fromIntegral . ord)) . alexGetChar
alexGetByte (loc, b:bs, input) = Just (b, (loc, bs, input))
alexGetByte (loc, [], []) = Nothing
alexGetByte (loc, [], nextChar:rest) =
seq nextLoc (Just (byte, (nextLoc, restBytes, rest)))
where
nextLoc = moveChar nextChar loc
byte:restBytes = UTF8.encode [nextChar]
moveChar :: Char -> SrcLocation -> SrcLocation
moveChar '\n' = incLine 1
moveChar '\t' = incTab
moveChar '\r' = id
moveChar _ = incColumn 1
lexicalError :: P a
lexicalError = do
location <- getLocation
c <- liftM head getInput
throwError $ UnexpectedChar c location
readOctNoO :: String -> Integer
readOctNoO (zero:rest) = read (zero:'O':rest)
| jml/language-python | src/Language/Python/Common/LexerUtils.hs | bsd-3-clause | 7,246 | 0 | 17 | 1,596 | 1,955 | 1,030 | 925 | 158 | 6 |
{-# OPTIONS_HADDOCK hide #-}
{-# LANGUAGE TemplateHaskell, OverloadedStrings #-}
module SecondTransfer.Http1.Parse(
newIncrementalHttp1Parser
,addBytes
-- Internal exports, used by the test suite
,locateCRLFs
,splitByColon
,stripBs
,headerListToHTTP1RequestText
,headerListToHTTP1ResponseText
,serializeHTTPResponse
,methodHasRequestBody
,methodHasRequestBody'
,methodHasResponseBody
,chunkParser
,transferEncodingIsChunked
,wrapChunk
,unwrapChunks
,leftoversFromParserCompletion
,responseStatusHasResponseBody
,IncrementalHttp1Parser
,Http1ParserCompletion(..)
,BodyStopCondition(..)
) where
import Control.Exception (throw)
import Control.Lens
import qualified Control.Lens as L
import Control.Applicative
--import Control.DeepSeq (deepseq)
import GHC.Stack
import Numeric as Nm
import qualified Data.ByteString as B
import Data.List (foldl')
import qualified Data.ByteString.Builder as Bu
import Data.ByteString.Char8 (pack, unpack)
import qualified Data.ByteString.Char8 as Ch8
import qualified Data.ByteString.Lazy as Lb
import qualified Data.ByteString.Lazy as LB
import Data.Char (toLower, isSpace)
import Data.Maybe (isJust, fromMaybe)
import GHC.Stack
import qualified Data.Attoparsec.ByteString as Ap
import qualified Data.Attoparsec.ByteString.Char8 as Ap8
import Data.Foldable (find)
import Data.Word (Word8)
import qualified Data.Map as M
import Data.Conduit
import Text.Read (readEither)
import qualified Network.URI as U
import SimpleHttpHeadersHq
import qualified SecondTransfer.Utils.HTTPHeaders as He
import SecondTransfer.Exception
import SecondTransfer.Utils (subByteString)
import qualified SecondTransfer.ConstantsAndLimits as Constant
-- import Debug.Trace
data IncrementalHttp1Parser = IncrementalHttp1Parser {
_fullText :: Bu.Builder
,_stateParser :: HeaderParseClosure
}
type HeaderParseClosure = (LB.ByteString -> ([Int], Int, Word8))
-- L.makeLenses ''IncrementalHttp1Parser
instance Show IncrementalHttp1Parser where
show (IncrementalHttp1Parser ft _sp ) = show $ Bu.toLazyByteString ft
newIncrementalHttp1Parser :: IncrementalHttp1Parser
newIncrementalHttp1Parser = IncrementalHttp1Parser {
_fullText = mempty
,_stateParser = locateCRLFs 0 [] 0
}
-- | Was the parser complete?
data Http1ParserCompletion =
-- | No, not even headers are done. Use the returned
-- value to continue
MustContinue_H1PC !IncrementalHttp1Parser
-- | Headers were completed. For some HTTP methods that's all
-- there is, and that's what this case represents. The second
-- argument is a left-overs string, that should be completed
-- with any other data required
|OnlyHeaders_H1PC !HqHeaders !B.ByteString
-- | For requests with a body. The second argument is a condition
-- to stop receiving the body, the third is leftovers from
-- parsing the headers.
|HeadersAndBody_H1PC !HqHeaders !BodyStopCondition !B.ByteString
-- | Some requests are ill-formed. We can check those cases
-- here.
|RequestIsMalformed_H1PC String
deriving Show
leftoversFromParserCompletion :: Http1ParserCompletion -> B.ByteString
leftoversFromParserCompletion (OnlyHeaders_H1PC _ l) = l
leftoversFromParserCompletion (HeadersAndBody_H1PC _ _ l) = l
leftoversFromParserCompletion _ = mempty
-- | Stop condition when parsing the body.
-- Tons and tons of messages in the internet go without a Content-Length
-- header, in those cases there is a long chain of conditions to determine the
-- message length, and at the end of those, there is CloseConnection
--
data BodyStopCondition =
UseBodyLength_BSC Int -- ^ There is a content-length header, and a length
| ConnectionClosedByPeer_BSC -- ^ We expect the connection to be closed by the peer when the stream finishes
| Chunked_BSC -- ^ It's a chunked transfer, use the corresponding parser
| SemanticAbort_BSC -- ^ Terrible things have happened, close the connection
deriving (Show, Eq)
-- | What can we parse from the first line?
data FirstLineDatum =
-- | First argument is the URI, second the method
Request_RoRL B.ByteString B.ByteString
-- | First argument is the status code
|Response_RoRL Int
-- | First line is just part of the mime message , this is used
-- by HTTP/1.1. First argument is the header "name", second
-- is the header value.
|NormalMime_RoRL B.ByteString B.ByteString
deriving (Show, Eq)
addBytes :: IncrementalHttp1Parser -> LB.ByteString -> Http1ParserCompletion
addBytes (IncrementalHttp1Parser full_text header_parse_closure) new_bytes =
let -- Just feed the bytes
(positions, length_so_far, last_char ) = header_parse_closure new_bytes
new_full_text = full_text `mappend` (Bu.lazyByteString new_bytes)
could_finish = twoCRLFsAreConsecutive positions
total_length_now = fromIntegral (LB.length new_bytes) + length_so_far
full_text_lbs = (Bu.toLazyByteString new_full_text)
-- This will only trigger for ill-formed heads, if the head is parsed successfully, this
-- flag will be ignored.
head_is_suspicious =
if total_length_now > 399 then
if total_length_now < Constant.maxUrlLength
then looksSuspicious full_text_lbs
else True
else False
in
case (could_finish, head_is_suspicious) of
(Just at_position, _) -> elaborateHeaders new_full_text positions at_position
(Nothing, True ) -> RequestIsMalformed_H1PC "Head is suspicious"
(Nothing, False) -> MustContinue_H1PC
$ IncrementalHttp1Parser
new_full_text
(locateCRLFs length_so_far positions last_char)
-- Look for suspicious patterns in the bs, like tab characters, or \r or \n
-- which are alone
looksSuspicious :: Lb.ByteString -> Bool
looksSuspicious bs =
let
have_weird_characters = isJust $ Lb.find (\w8 -> w8 < 32 && w8 /= 10 && w8 /= 13 ) bs
have_lone_n = let
ei = Lb.elemIndices 13 bs
eii = Lb.elemIndices 10 bs
zp = zip ei eii
f ((i,j):rest) | i+1 == j = f rest
| i == Lb.length bs - 1 = False
| otherwise = True
f [] = False
in f zp || abs ( length ei - length eii) > 1
result = have_lone_n || have_weird_characters
in result
-- This function takes care of retrieving headers....
elaborateHeaders :: HasCallStack => Bu.Builder -> [Int] -> Int -> Http1ParserCompletion
elaborateHeaders full_text crlf_positions last_headers_position =
let
-- Start by getting a full byte-string representation of the headers,
-- no need to be silly with chunks.
full_headers_text = Lb.toStrict $ Bu.toLazyByteString full_text
-- Filter out CRLF pairs corresponding to multiline headers.
no_cont_positions_reverse =
filter
(\ pos -> if pos == last_headers_position then True else
if pos > last_headers_position then False else
not . isWsCh8 $
(Ch8.index
full_headers_text
(pos + 2)
)
)
crlf_positions
no_cont_positions = reverse no_cont_positions_reverse
-- Now get the headers as slices from the original string.
headers_pre :: [B.ByteString]
headers_pre = map
(\ (start, stop) ->
subByteString start stop full_headers_text
)
(zip
((:)
0
(map
( + 2 )
(init no_cont_positions)
)
)
no_cont_positions
)
no_empty_headers ::[B.ByteString]
no_empty_headers = filter (\x -> B.length x > 0) headers_pre
-- We remove the first "header" because it is actually the
-- initial HTTP request/response line
headers_0 = map splitByColon $ tail no_empty_headers
-- The first line is not actually a header, but contains the method, the version
-- and the URI
maybe_request_or_response = parseFirstLine (head headers_pre)
headers_1 = [
( (stripBsHName . bsToLower $ hn), stripBs hv ) | (hn, hv) <- headers_0
]
Just request_or_response = maybe_request_or_response
(headers_2, has_body) = case request_or_response of
Request_RoRL uri method ->
let
-- No lowercase, methods are case sensitive
-- lc_method = bsToLower method
--
-- TODO: There is a bug here, according to Section 3.3 of RFC 7230
has_body' = methodHasRequestBody method
in
-- TODO: We should probably add the "scheme" pseudo header here
( (":path", uri):(":method",method):headers_1, has_body' )
Response_RoRL status ->
let
status_str = pack . show $ status
excludes_body =
( (Ch8.head status_str) == '1')
||
( status == 204 || status == 304)
in
((":status", status_str): headers_1, not excludes_body)
NormalMime_RoRL hn hv ->
let
headers_interin =
(bsToLower hn, hv):headers_1
(status_str, hh) = case lookup "status" headers_interin of
Nothing -> ("200", headers_interin)
Just x -> (x,
filter
(
\(hhn, _hv) -> hhn /= "status")
headers_interin
)
excludes_body =
( (Ch8.head status_str) == '1')
||
( status_str == "204" || status_str == "304" )
in ((":status", takeFirstPartOfStatus status_str): hh, not excludes_body)
-- Still we need to lower-case header names, and trim them
headers_3 = [
( (stripBsHName . bsToLower $ hn), stripBs hv ) | (hn, hv) <- headers_2
]
-- TODO: Find out what to do with header parse errors
(headers_hq, _parse_error_list) = parseFromTupleList headers_3
content_stop :: BodyStopCondition
content_stop =
let
cnt_length_header = find (\ x -> (fst x) == "content-length" ) headers_3
transfer_encoding = find (\ x -> (fst x) == "transfer-encoding" ) headers_3
in
case transfer_encoding of
Nothing ->
case cnt_length_header of
Just (_, hv) -> case readEither . unpack $ hv of
Left _ -> SemanticAbort_BSC
Right n -> UseBodyLength_BSC n
Nothing -> ConnectionClosedByPeer_BSC
Just (_, tre_value)
| transferEncodingIsChunked tre_value ->
Chunked_BSC
| otherwise ->
SemanticAbort_BSC
leftovers = B.drop (last_headers_position + 4) full_headers_text
all_headers_ok = all verifyHeaderSyntax headers_1
in
if isJust maybe_request_or_response then
(if all_headers_ok then
if has_body
then
HeadersAndBody_H1PC headers_hq content_stop leftovers
else
OnlyHeaders_H1PC headers_hq leftovers
else
RequestIsMalformed_H1PC "InvalidSyntaxOnHeaders")
else
RequestIsMalformed_H1PC "InvalidFirstLineOnRequest"
splitByColon :: HasCallStack => B.ByteString -> (B.ByteString, B.ByteString)
splitByColon = L.over L._2 (B.tail) . Ch8.break (== ':')
transferEncodingIsChunked :: B.ByteString -> Bool
transferEncodingIsChunked x = x == "chunked"
verifyHeaderName :: B.ByteString -> Bool
verifyHeaderName =
B.all ( \ w8 ->
( ( w8 >= 48 && w8 <=57 ) || ( w8 >= 65 && w8 <= 90)
|| ( w8 >= 97 && w8 <= 122) )
|| ( w8 == 43 ) || ( w8 == 95) -- "extensions" for our local tooling
|| ( w8 == 45) -- Standard dash
)
verifyHeaderValue :: B.ByteString -> Bool
verifyHeaderValue = B.all ( \ w8 ->
w8 >= 32 && w8 < 127
)
verifyHeaderSyntax :: (B.ByteString, B.ByteString) -> Bool
verifyHeaderSyntax (a,b) = verifyHeaderName a && verifyHeaderValue b
parseFirstLine :: B.ByteString -> Maybe FirstLineDatum
parseFirstLine s =
let
either_error_or_rrl = Ap.parseOnly (httpFirstLine <* Ap.endOfInput ) s
in
case either_error_or_rrl of
Left _ -> Nothing
Right rrl -> Just rrl
bsToLower :: B.ByteString -> B.ByteString
bsToLower = Ch8.map toLower
-- This ought to be slow!
stripBs :: B.ByteString -> B.ByteString
stripBs s =
fst
.
last
$
takeWhile
( \ (_, ch) -> isWsCh8 ch )
$
iterate
( \ (bs, _) ->
case Ch8.unsnoc bs of
Just (newbs, w8) -> (newbs, w8)
Nothing -> ("", 'n')
)
(Ch8.dropWhile isWsCh8 s, ' ')
stripBsHName :: B.ByteString -> B.ByteString
stripBsHName s =
Ch8.dropWhile isWsCh8 s
locateCRLFs :: Int -> [Int] -> Word8 -> LB.ByteString -> ([Int], Int, Word8)
locateCRLFs initial_offset other_positions prev_last_char next_chunk =
let
(last_char, positions_list, strlen) =
LB.foldl
(\ (prev_char, lst, i) w8 ->
let
j = i + 1
in case (prev_char, w8) of
(13,10) -> (w8, (i-1):lst, j)
_ -> (w8, lst, j)
)
(prev_last_char, other_positions, initial_offset)
next_chunk
in (positions_list, strlen, last_char)
-- Parses the given list of positions, which is a reversed list. If
-- we find that the two latest positions of CRLF are consecutive,
-- then we are ok. and return it
twoCRLFsAreConsecutive :: [Int] -> Maybe Int
twoCRLFsAreConsecutive positions =
let
-- This function is moving from tail to head
go seen (p2:p1:r) | p2 - p1 == 2 = go (Just p1) (p1:r)
| otherwise = go seen (p1:r)
go seen _ = seen
in go Nothing positions
isWsCh8 :: Char -> Bool
isWsCh8 ch = isJust (Ch8.elemIndex
ch
" \t"
)
isWs :: Word8 -> Bool
isWs ch = (ch == 32) || (ch == 9)
http1Token :: Ap.Parser B.ByteString
http1Token = Ap.string "HTTP/1.1" <|> Ap.string "HTTP/1.0"
http1Method :: Ap.Parser B.ByteString
http1Method =
Ap.string "GET"
<|> Ap.string "POST"
<|> Ap.string "HEAD"
<|> Ap.string "PUT"
<|> Ap.string "OPTIONS"
<|> Ap.string "TRACE"
<|> Ap.string "CONNECT"
unspacedUri :: Ap.Parser B.ByteString
unspacedUri = Ap.takeWhile (not . isWs)
space :: Ap.Parser Word8
space = Ap.word8 32
requestLine :: Ap.Parser FirstLineDatum
requestLine =
flip Request_RoRL
<$>
http1Method
<* space
<*>
unspacedUri
<* space
<* http1Token
digit :: Ap.Parser Word8
digit = Ap.satisfy (Ap.inClass "0-9")
safeStringToInt :: HasCallStack => String -> Int
safeStringToInt s =
case readEither s of
Left _ -> throw (HTTP11SyntaxException $ "BadDigits found via stack: " ++ prettyCallStack callStack)
Right n -> n
responseLine :: HasCallStack => Ap.Parser FirstLineDatum
responseLine =
(pure Response_RoRL)
<*
http1Token
<*
space
<*>
( safeStringToInt . map (toEnum . fromIntegral ) <$> Ap.count 3 digit )
<*
space
<*
Ap.takeByteString
classStuff :: String
classStuff =
"ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789_-"
-- Another type of first line
normalMimeLine :: Ap.Parser FirstLineDatum
normalMimeLine =
(pure NormalMime_RoRL )
<*
(Ap.many' space)
<*>
Ap.takeWhile1 ( Ap.inClass classStuff )
<*
(Ap.many' space)
<*
Ap8.char ':'
<*
(Ap.many' space)
<*>
(fst . Ch8.spanEnd isSpace <$> Ap8.takeByteString )
takeFirstPartOfStatus :: B.ByteString -> B.ByteString
takeFirstPartOfStatus s = B.takeWhile
(\ w -> w >= (fromIntegral $ fromEnum '0')
&&
w <= (fromIntegral $ fromEnum '9')
)
s
httpFirstLine :: Ap.Parser FirstLineDatum
httpFirstLine = requestLine <|> responseLine <|> normalMimeLine
-- A parser for chunked messages ....
chunkParser :: Ap.Parser B.ByteString
chunkParser = do
lng_bs <- Ap8.hexadecimal :: Ap.Parser Int
Ap.option () (
do
_ <- Ap.sepBy (Ap8.many1 $ Ap8.satisfy (Ap8.notInClass ";\r\n") ) (Ap8.char ';')
return ()
)
_ <- Ap8.char '\r'
_ <- Ap8.char '\n'
cnt <- Ap.take lng_bs
_ <- Ap8.char '\r'
_ <- Ap8.char '\n'
return cnt
wrapChunk :: B.ByteString -> Lb.ByteString
wrapChunk bs = let
lng = B.length bs
lng_str = showHex lng ""
a0 = Bu.byteString . pack $ lng_str
a1 = Bu.byteString bs
in Bu.toLazyByteString $ a0 `mappend` "\r\n" `mappend` a1 `mappend` "\r\n"
unwrapChunks :: Monad m => Conduit B.ByteString m B.ByteString
unwrapChunks =
do
-- Leftovers are fed and they will be read here.
input <- await
case {- trace ("CHNK iNPUT:" ++ show input) -} input of
Nothing -> return ()
Just bs ->
let
parse_result = Ap.parse chunkParser bs
in onresult parse_result
where
onresult parse_result =
case parse_result of
Ap.Fail _ _ _ -> throw $ HTTP11SyntaxException "ChunkedParsingFailed"
Ap.Partial fn -> go fn
Ap.Done leftovers payload -> do
payload `seq` yield payload
if (B.length payload > 0)
then
restart leftovers
else
leftover leftovers
go fn = do
input <- await
case {- trace ( "CHNK Input: " ++ show input) $ -} input of
Nothing -> do
-- Due to buggy pears, we have to be more accepting
-- throw $ HTTP11SyntaxException "ChunkedParsingLeftUnfinished"
yield ""
return ()
Just bs ->
let
parse_result = fn bs
in onresult parse_result
restart leftovers =
let
parse_result = Ap.parse chunkParser leftovers
in onresult parse_result
-- | This is a serialization function: it goes from content to string
-- It is not using during parse, but during the inverse process.
-- This function adds a single \r\n at the end of the output
headerListToHTTP1ResponseText :: HasCallStack => HqHeaders -> Bu.Builder
headerListToHTTP1ResponseText headers =
case headers ^. serialized_HqH of
-- According to the specs, :status can be only
-- the first header
(hn,hv): rest | hn == ":status" ->
(
(first_line . safeStringToInt . unpack $ hv)
`mappend`
(go rest)
)
rest ->
(
(first_line 200)
`mappend`
(go rest)
)
where
go [] = mempty
go ((hn,hv):rest) =
(Bu.byteString hn) `mappend` ":" `mappend` " " `mappend` (Bu.byteString hv)
`mappend` "\r\n" `mappend` (go rest)
first_line :: Int -> Bu.Builder
first_line code = mconcat [
(Bu.byteString "HTTP/1.1"), " ",
(Bu.string7 . show $ code), " ",
(M.findWithDefault "OK" code httpStatusTable),
"\r\n"
]
-- | Converts a list of headers to a request head.
-- Invoke with the request data. Don't forget to clean the headers first.
-- NOTICE that this function doesn't add the \r\n extra-token for the empty
-- line at the end of headers.
headerListToHTTP1RequestText :: HqHeaders -> Bu.Builder
headerListToHTTP1RequestText headers =
expressAsHTTP1RequestHeaderBlock headers'
where
host_header = case headers ^. authority_Hi of
Nothing -> headers ^. host_Hi
a@(Just some_auth) -> a
headers' =
(set host_Hi host_header) .
(set authority_Hi Nothing) $
headers
-- -- | Converts a list of headers to a request head.
-- -- Invoke with the request data. Don't forget to clean the headers first.
-- -- NOTICE that this function doesn't add the \r\n extra-token for the empty
-- -- line at the end of headers.
-- headerListToHTTP1RequestText :: HqHeaders -> Bu.Builder
-- headerListToHTTP1RequestText headers =
-- go1 Nothing Nothing mempty (headers ^. serialized_HqH)
-- where
-- go1 mb_method mb_local_uri assembled_body [] =
-- (fromMaybe "GET" mb_method) `mappend` " " `mappend` (fromMaybe "*" mb_local_uri) `mappend` " " `mappend` "HTTP/1.1" `mappend` "\r\n"
-- `mappend` assembled_body
-- go1 _ mb_local_uri assembled_body ((hn,hv): rest)
-- | hn == ":method" =
-- go1 (Just . Bu.byteString . validMethod $ hv) mb_local_uri assembled_body rest
-- go1 mb_method _mb_local_uri assembled_body ((hn,hv): rest)
-- | hn == ":path" =
-- go1 mb_method (Just . Bu.byteString . cleanupAbsoluteUri $ hv) assembled_body rest
-- -- Authority pseudo-header becomes a host header.
-- go1 mb_method _mb_local_uri assembled_body ((hn,hv): rest)
-- | hn == ":authority" =
-- go1 mb_method (Just . Bu.byteString . cleanupAbsoluteUri $ hv) (assembled_body `mappend` "host" `mappend` ":" `mappend` (Bu.byteString hv) `mappend` "\r\n") rest
-- go1 mb_method mb_local_uri assembled_body ((hn,hv):rest) -- Ignore any strange pseudo-headers
-- | He.headerIsPseudo hn = go1 mb_method mb_local_uri assembled_body rest
-- | otherwise = go1 mb_method mb_local_uri (assembled_body `mappend` (Bu.byteString hn) `mappend` ":" `mappend` (Bu.byteString hv) `mappend` "\r\n") rest
-- | Function used for testing....
serializeHTTPResponse :: HqHeaders -> [B.ByteString] -> Lb.ByteString
serializeHTTPResponse response_headers fragments =
let
-- So got some data in an answer. Now there are three ways to go about
-- the returned data: to force a chunked transfer-encoding, to read all
-- the data and add/set the Content-Length header, or to let the user
-- decide which one she prefers.
--
-- Right now I'm going for the second one, until somebody complains
-- This is equivalent to a lazy byte-string...but I just need the
-- length
-- I promised to minimize the number of interventions of the library,
-- so it could be a good idea to remove this one further down the
-- road.
data_size = foldl' (\ n bs -> n + B.length bs) 0 fragments
h2 = L.set
contentLength_Hi
(Just . fromIntegral $ data_size )
response_headers
-- Next, I must serialize the headers....
headers_text_as_builder = headerListToHTTP1ResponseText h2
-- We dump the headers first... unfortunately when talking
-- HTTP/1.1 the most efficient way to write those bytes is
-- to create a big buffer and pass it on to OpenSSL.
-- However the Builder generating the headers above says
-- it generates fragments between 4k and 32 kb, I checked it
-- and it is true, so we can use it
-- Now we need to insert an extra \r\n, even it the response is
-- empty
-- And then we use the builder to re-format the fragments returned
-- by the coherent worker
-- TODO: This could be a good place to introduce chunked responses.
body_builder = mconcat $ map Bu.byteString fragments
in Bu.toLazyByteString $ headers_text_as_builder `mappend` "\r\n" `mappend`
body_builder
validMethod :: B.ByteString -> B.ByteString
validMethod mth | mth == "GET" = mth
| mth == "POST" = mth
| mth == "HEAD" = mth
| mth == "OPTIONS" = mth
| mth == "PUT" = mth
| mth == "DELETE" = mth
| mth == "TRACE" = mth
| otherwise = "GET"
methodHasRequestBody :: B.ByteString -> Bool
methodHasRequestBody mth | mth == "GET" = False
| mth == "POST" = True
| mth == "HEAD" = False
| mth == "OPTIONS" = False
| mth == "PUT" = True
| mth == "DELETE" = False
| mth == "TRACE" = False
| otherwise = False
methodHasRequestBody' :: HttpMethod -> Bool
methodHasRequestBody' mth = case mth of
Get_HtM -> False
Post_HtM -> True
Head_HtM -> False
Options_HtM -> False
Put_HtM -> True
Delete_HtM -> False
-- These are most likely wrong TODO: fix
methodHasResponseBody :: B.ByteString -> Bool
methodHasResponseBody mth | mth == "GET" = True
| mth == "POST" = True
| mth == "HEAD" = False
| mth == "OPTIONS" = False
| mth == "PUT" = True
| mth == "DELETE" = True
| mth == "TRACE" = False
| otherwise = False
responseStatusHasResponseBody :: Int -> Bool
responseStatusHasResponseBody code
| code == 204 = False
| code == 304 = False
| otherwise = True
cleanupAbsoluteUri :: HasCallStack => B.ByteString -> B.ByteString
-- Just trigger a 404 with an informative message (perhaps)
cleanupAbsoluteUri u
| B.length u == 0
= "/client-gives-invalid-uri/"
| B.head u /= 47
= "/client-gives-invalid-uri/"
| otherwise
=
let
str = unpack u
ok = U.isRelativeReference str
in if ok then u else "/client-gives-invalid-uri/"
httpStatusTable :: M.Map Int Bu.Builder
httpStatusTable = M.fromList
[
(100, "Continue"),
(101, "Switching Protocols"),
(200, "OK"),
(201, "Created"),
(202, "Accepted"),
(203, "Non-Authoritative Information"),
(204, "No Content"),
(205, "Reset Content"),
(206, "Partial Content"),
(300, "Multiple Choices"),
(301, "Moved Permanently"),
(302, "Found"),
(303, "See Other"),
(304, "Not Modified"),
(305, "Use Proxy"),
(307, "Temporary Redirect"),
(400, "Bad Request"),
(401, "Unauthorized"),
(402, "Payment Required"),
(403, "Forbidden"),
(404, "Not Found"),
(405, "Method Not Allowed"),
(406, "Not Acceptable"),
(407, "Proxy Authentication Required"),
(408, "Request Timeout"),
(409, "Conflict"),
(410, "Gone"),
(411, "Length Required"),
(412, "Precondition Failed"),
(413, "Request Entity Too Large"),
(414, "Request-URI Too Long"),
(415, "Unsupported Media Type"),
(416, "Requested Range Not Satisfiable"),
(417, "Expectation Failed"),
(500, "Internal Server Error"),
(501, "Not Implemented"),
(502, "Bad Gateway"),
(503, "Service Unavailable"),
(504, "Gateway Timeout"),
(505, "HTTP Version Not Supported")
]
| shimmercat/second-transfer | hs-src/SecondTransfer/Http1/Parse.hs | bsd-3-clause | 28,177 | 0 | 23 | 9,049 | 5,606 | 3,034 | 2,572 | -1 | -1 |
{-# LANGUAGE FlexibleInstances #-}
module Example(main, platforms) where
import Development.Bake
import Development.Shake.Command
import System.Environment.Extra
import System.FilePath
import Data.List.Extra
import Data.Tuple.Extra
import System.Directory
import Control.Monad.Extra
import Data.Maybe
import System.Time.Extra
useStep = True
data Platform = Linux | Windows deriving (Show,Read)
data Action = Compile | Run Int deriving (Show,Read)
instance Stringy (Platform, Action) where
stringyTo (a,b) = show a ++ " " ++ show b
stringyFrom = (read *** read) . word1
platforms = [Linux,Windows]
main :: IO ()
main = do
let err = "You need to set an environment variable named $REPO for the Git repo"
repo <- fromMaybe (error err) `fmap` lookupEnv "REPO"
bake $
ovenPretty $
(if useStep
then ovenStepGit compile repo "master" Nothing ["dist"]
else ovenIncremental . ovenGit repo "master" Nothing) $
ovenNotifyStdout $
ovenTest (return allTests) execute
defaultOven{ovenServer=("127.0.0.1",5000)}
allTests = [(p,t) | p <- platforms, t <- Compile : map Run [1,10,0]]
compile :: IO [FilePath]
compile = do
createDirectoryIfMissing True "dist"
unit $ cmd "ghc --make Main.hs -o dist/Main"
-- ghc --make only has 1 second timestamp resolution
-- so sleep for a second to make sure we work with incremental compilation
sleep 1
return ["dist"]
execute :: (Platform,Action) -> TestInfo (Platform,Action)
execute (p,Compile) = require [show p] $ run $ unless useStep $ do
incrementalStart
compile
incrementalDone
execute (p,Run i) = depend [(p,Compile)] $ require [show p] $ run $
cmd ("dist" </> "Main") (show i)
| capital-match/bake | src/Example.hs | bsd-3-clause | 1,741 | 0 | 13 | 368 | 556 | 300 | 256 | 45 | 2 |
-- | The example program from the linux-perf.cabal file.
-- Let's run it once in a while to make sure API changes did not break it.
module Main where
import Profiling.Linux.Perf (readPerfData)
import Profiling.Linux.Perf.Types (PerfData (..))
import System.Environment (getArgs)
main :: IO ()
main = do
args <- getArgs
case args of
[] -> return ()
(file:_) -> do
perfData <- readPerfData file
print $ length $ perfData_events perfData
| Mikolaj/haskell-linux-perf | test/count-events.hs | bsd-3-clause | 474 | 0 | 13 | 108 | 122 | 66 | 56 | 12 | 2 |
{-# LANGUAGE Haskell2010 #-}
{-# LINE 1 "System/FilePath/Windows.hs" #-}
{-# LANGUAGE CPP #-}
{-# LANGUAGE Safe #-}
{-# LANGUAGE PatternGuards #-}
-- This template expects CPP definitions for:
-- Windows = Posix | Windows
-- True = False | True
-- |
-- Module : System.FilePath.Windows
-- Copyright : (c) Neil Mitchell 2005-2014
-- License : BSD3
--
-- Maintainer : ndmitchell@gmail.com
-- Stability : stable
-- Portability : portable
--
-- A library for 'FilePath' manipulations, using Windows style paths on
-- all platforms. Importing "System.FilePath" is usually better.
--
-- Given the example 'FilePath': @\/directory\/file.ext@
--
-- We can use the following functions to extract pieces.
--
-- * 'takeFileName' gives @\"file.ext\"@
--
-- * 'takeDirectory' gives @\"\/directory\"@
--
-- * 'takeExtension' gives @\".ext\"@
--
-- * 'dropExtension' gives @\"\/directory\/file\"@
--
-- * 'takeBaseName' gives @\"file\"@
--
-- And we could have built an equivalent path with the following expressions:
--
-- * @\"\/directory\" '</>' \"file.ext\"@.
--
-- * @\"\/directory\/file" '<.>' \"ext\"@.
--
-- * @\"\/directory\/file.txt" '-<.>' \"ext\"@.
--
-- Each function in this module is documented with several examples,
-- which are also used as tests.
--
-- Here are a few examples of using the @filepath@ functions together:
--
-- /Example 1:/ Find the possible locations of a Haskell module @Test@ imported from module @Main@:
--
-- @['replaceFileName' path_to_main \"Test\" '<.>' ext | ext <- [\"hs\",\"lhs\"] ]@
--
-- /Example 2:/ Download a file from @url@ and save it to disk:
--
-- @do let file = 'makeValid' url
-- System.IO.createDirectoryIfMissing True ('takeDirectory' file)@
--
-- /Example 3:/ Compile a Haskell file, putting the @.hi@ file under @interface@:
--
-- @'takeDirectory' file '</>' \"interface\" '</>' ('takeFileName' file '-<.>' \"hi\")@
--
-- References:
-- [1] <http://msdn.microsoft.com/en-us/library/windows/desktop/aa365247.aspx Naming Files, Paths and Namespaces> (Microsoft MSDN)
module System.FilePath.Windows
(
-- * Separator predicates
FilePath,
pathSeparator, pathSeparators, isPathSeparator,
searchPathSeparator, isSearchPathSeparator,
extSeparator, isExtSeparator,
-- * @$PATH@ methods
splitSearchPath, getSearchPath,
-- * Extension functions
splitExtension,
takeExtension, replaceExtension, (-<.>), dropExtension, addExtension, hasExtension, (<.>),
splitExtensions, dropExtensions, takeExtensions, replaceExtensions,
stripExtension,
-- * Filename\/directory functions
splitFileName,
takeFileName, replaceFileName, dropFileName,
takeBaseName, replaceBaseName,
takeDirectory, replaceDirectory,
combine, (</>),
splitPath, joinPath, splitDirectories,
-- * Drive functions
splitDrive, joinDrive,
takeDrive, hasDrive, dropDrive, isDrive,
-- * Trailing slash functions
hasTrailingPathSeparator,
addTrailingPathSeparator,
dropTrailingPathSeparator,
-- * File name manipulations
normalise, equalFilePath,
makeRelative,
isRelative, isAbsolute,
isValid, makeValid
)
where
import Data.Char(toLower, toUpper, isAsciiLower, isAsciiUpper)
import Data.Maybe(isJust)
import Data.List(stripPrefix)
import System.Environment(getEnv)
infixr 7 <.>, -<.>
infixr 5 </>
---------------------------------------------------------------------
-- Platform Abstraction Methods (private)
-- | Is the operating system Unix or Linux like
isPosix :: Bool
isPosix = not isWindows
-- | Is the operating system Windows like
isWindows :: Bool
isWindows = True
---------------------------------------------------------------------
-- The basic functions
-- | The character that separates directories. In the case where more than
-- one character is possible, 'pathSeparator' is the \'ideal\' one.
--
-- > Windows: pathSeparator == '\\'
-- > Posix: pathSeparator == '/'
-- > isPathSeparator pathSeparator
pathSeparator :: Char
pathSeparator = if isWindows then '\\' else '/'
-- | The list of all possible separators.
--
-- > Windows: pathSeparators == ['\\', '/']
-- > Posix: pathSeparators == ['/']
-- > pathSeparator `elem` pathSeparators
pathSeparators :: [Char]
pathSeparators = if isWindows then "\\/" else "/"
-- | Rather than using @(== 'pathSeparator')@, use this. Test if something
-- is a path separator.
--
-- > isPathSeparator a == (a `elem` pathSeparators)
isPathSeparator :: Char -> Bool
isPathSeparator '/' = True
isPathSeparator '\\' = isWindows
isPathSeparator _ = False
-- | The character that is used to separate the entries in the $PATH environment variable.
--
-- > Windows: searchPathSeparator == ';'
-- > Posix: searchPathSeparator == ':'
searchPathSeparator :: Char
searchPathSeparator = if isWindows then ';' else ':'
-- | Is the character a file separator?
--
-- > isSearchPathSeparator a == (a == searchPathSeparator)
isSearchPathSeparator :: Char -> Bool
isSearchPathSeparator = (== searchPathSeparator)
-- | File extension character
--
-- > extSeparator == '.'
extSeparator :: Char
extSeparator = '.'
-- | Is the character an extension character?
--
-- > isExtSeparator a == (a == extSeparator)
isExtSeparator :: Char -> Bool
isExtSeparator = (== extSeparator)
---------------------------------------------------------------------
-- Path methods (environment $PATH)
-- | Take a string, split it on the 'searchPathSeparator' character.
-- Blank items are ignored on Windows, and converted to @.@ on Posix.
-- On Windows path elements are stripped of quotes.
--
-- Follows the recommendations in
-- <http://www.opengroup.org/onlinepubs/009695399/basedefs/xbd_chap08.html>
--
-- > Posix: splitSearchPath "File1:File2:File3" == ["File1","File2","File3"]
-- > Posix: splitSearchPath "File1::File2:File3" == ["File1",".","File2","File3"]
-- > Windows: splitSearchPath "File1;File2;File3" == ["File1","File2","File3"]
-- > Windows: splitSearchPath "File1;;File2;File3" == ["File1","File2","File3"]
-- > Windows: splitSearchPath "File1;\"File2\";File3" == ["File1","File2","File3"]
splitSearchPath :: String -> [FilePath]
splitSearchPath = f
where
f xs = case break isSearchPathSeparator xs of
(pre, [] ) -> g pre
(pre, _:post) -> g pre ++ f post
g "" = ["." | isPosix]
g ('\"':x@(_:_)) | isWindows && last x == '\"' = [init x]
g x = [x]
-- | Get a list of 'FilePath's in the $PATH variable.
getSearchPath :: IO [FilePath]
getSearchPath = fmap splitSearchPath (getEnv "PATH")
---------------------------------------------------------------------
-- Extension methods
-- | Split on the extension. 'addExtension' is the inverse.
--
-- > splitExtension "/directory/path.ext" == ("/directory/path",".ext")
-- > uncurry (++) (splitExtension x) == x
-- > Valid x => uncurry addExtension (splitExtension x) == x
-- > splitExtension "file.txt" == ("file",".txt")
-- > splitExtension "file" == ("file","")
-- > splitExtension "file/file.txt" == ("file/file",".txt")
-- > splitExtension "file.txt/boris" == ("file.txt/boris","")
-- > splitExtension "file.txt/boris.ext" == ("file.txt/boris",".ext")
-- > splitExtension "file/path.txt.bob.fred" == ("file/path.txt.bob",".fred")
-- > splitExtension "file/path.txt/" == ("file/path.txt/","")
splitExtension :: FilePath -> (String, String)
splitExtension x = case nameDot of
"" -> (x,"")
_ -> (dir ++ init nameDot, extSeparator : ext)
where
(dir,file) = splitFileName_ x
(nameDot,ext) = breakEnd isExtSeparator file
-- | Get the extension of a file, returns @\"\"@ for no extension, @.ext@ otherwise.
--
-- > takeExtension "/directory/path.ext" == ".ext"
-- > takeExtension x == snd (splitExtension x)
-- > Valid x => takeExtension (addExtension x "ext") == ".ext"
-- > Valid x => takeExtension (replaceExtension x "ext") == ".ext"
takeExtension :: FilePath -> String
takeExtension = snd . splitExtension
-- | Remove the current extension and add another, equivalent to 'replaceExtension'.
--
-- > "/directory/path.txt" -<.> "ext" == "/directory/path.ext"
-- > "/directory/path.txt" -<.> ".ext" == "/directory/path.ext"
-- > "foo.o" -<.> "c" == "foo.c"
(-<.>) :: FilePath -> String -> FilePath
(-<.>) = replaceExtension
-- | Set the extension of a file, overwriting one if already present, equivalent to '-<.>'.
--
-- > replaceExtension "/directory/path.txt" "ext" == "/directory/path.ext"
-- > replaceExtension "/directory/path.txt" ".ext" == "/directory/path.ext"
-- > replaceExtension "file.txt" ".bob" == "file.bob"
-- > replaceExtension "file.txt" "bob" == "file.bob"
-- > replaceExtension "file" ".bob" == "file.bob"
-- > replaceExtension "file.txt" "" == "file"
-- > replaceExtension "file.fred.bob" "txt" == "file.fred.txt"
-- > replaceExtension x y == addExtension (dropExtension x) y
replaceExtension :: FilePath -> String -> FilePath
replaceExtension x y = dropExtension x <.> y
-- | Add an extension, even if there is already one there, equivalent to 'addExtension'.
--
-- > "/directory/path" <.> "ext" == "/directory/path.ext"
-- > "/directory/path" <.> ".ext" == "/directory/path.ext"
(<.>) :: FilePath -> String -> FilePath
(<.>) = addExtension
-- | Remove last extension, and the \".\" preceding it.
--
-- > dropExtension "/directory/path.ext" == "/directory/path"
-- > dropExtension x == fst (splitExtension x)
dropExtension :: FilePath -> FilePath
dropExtension = fst . splitExtension
-- | Add an extension, even if there is already one there, equivalent to '<.>'.
--
-- > addExtension "/directory/path" "ext" == "/directory/path.ext"
-- > addExtension "file.txt" "bib" == "file.txt.bib"
-- > addExtension "file." ".bib" == "file..bib"
-- > addExtension "file" ".bib" == "file.bib"
-- > addExtension "/" "x" == "/.x"
-- > addExtension x "" == x
-- > Valid x => takeFileName (addExtension (addTrailingPathSeparator x) "ext") == ".ext"
-- > Windows: addExtension "\\\\share" ".txt" == "\\\\share\\.txt"
addExtension :: FilePath -> String -> FilePath
addExtension file "" = file
addExtension file xs@(x:_) = joinDrive a res
where
res = if isExtSeparator x then b ++ xs
else b ++ [extSeparator] ++ xs
(a,b) = splitDrive file
-- | Does the given filename have an extension?
--
-- > hasExtension "/directory/path.ext" == True
-- > hasExtension "/directory/path" == False
-- > null (takeExtension x) == not (hasExtension x)
hasExtension :: FilePath -> Bool
hasExtension = any isExtSeparator . takeFileName
-- | Drop the given extension from a FilePath, and the @\".\"@ preceding it.
-- Returns 'Nothing' if the FilePath does not have the given extension, or
-- 'Just' and the part before the extension if it does.
--
-- This function can be more predictable than 'dropExtensions', especially if the filename
-- might itself contain @.@ characters.
--
-- > stripExtension "hs.o" "foo.x.hs.o" == Just "foo.x"
-- > stripExtension "hi.o" "foo.x.hs.o" == Nothing
-- > dropExtension x == fromJust (stripExtension (takeExtension x) x)
-- > dropExtensions x == fromJust (stripExtension (takeExtensions x) x)
-- > stripExtension ".c.d" "a.b.c.d" == Just "a.b"
-- > stripExtension ".c.d" "a.b..c.d" == Just "a.b."
-- > stripExtension "baz" "foo.bar" == Nothing
-- > stripExtension "bar" "foobar" == Nothing
-- > stripExtension "" x == Just x
stripExtension :: String -> FilePath -> Maybe FilePath
stripExtension [] path = Just path
stripExtension ext@(x:_) path = stripSuffix dotExt path
where dotExt = if isExtSeparator x then ext else '.':ext
-- | Split on all extensions.
--
-- > splitExtensions "/directory/path.ext" == ("/directory/path",".ext")
-- > splitExtensions "file.tar.gz" == ("file",".tar.gz")
-- > uncurry (++) (splitExtensions x) == x
-- > Valid x => uncurry addExtension (splitExtensions x) == x
-- > splitExtensions "file.tar.gz" == ("file",".tar.gz")
splitExtensions :: FilePath -> (FilePath, String)
splitExtensions x = (a ++ c, d)
where
(a,b) = splitFileName_ x
(c,d) = break isExtSeparator b
-- | Drop all extensions.
--
-- > dropExtensions "/directory/path.ext" == "/directory/path"
-- > dropExtensions "file.tar.gz" == "file"
-- > not $ hasExtension $ dropExtensions x
-- > not $ any isExtSeparator $ takeFileName $ dropExtensions x
dropExtensions :: FilePath -> FilePath
dropExtensions = fst . splitExtensions
-- | Get all extensions.
--
-- > takeExtensions "/directory/path.ext" == ".ext"
-- > takeExtensions "file.tar.gz" == ".tar.gz"
takeExtensions :: FilePath -> String
takeExtensions = snd . splitExtensions
-- | Replace all extensions of a file with a new extension. Note
-- that 'replaceExtension' and 'addExtension' both work for adding
-- multiple extensions, so only required when you need to drop
-- all extensions first.
--
-- > replaceExtensions "file.fred.bob" "txt" == "file.txt"
-- > replaceExtensions "file.fred.bob" "tar.gz" == "file.tar.gz"
replaceExtensions :: FilePath -> String -> FilePath
replaceExtensions x y = dropExtensions x <.> y
---------------------------------------------------------------------
-- Drive methods
-- | Is the given character a valid drive letter?
-- only a-z and A-Z are letters, not isAlpha which is more unicodey
isLetter :: Char -> Bool
isLetter x = isAsciiLower x || isAsciiUpper x
-- | Split a path into a drive and a path.
-- On Posix, \/ is a Drive.
--
-- > uncurry (++) (splitDrive x) == x
-- > Windows: splitDrive "file" == ("","file")
-- > Windows: splitDrive "c:/file" == ("c:/","file")
-- > Windows: splitDrive "c:\\file" == ("c:\\","file")
-- > Windows: splitDrive "\\\\shared\\test" == ("\\\\shared\\","test")
-- > Windows: splitDrive "\\\\shared" == ("\\\\shared","")
-- > Windows: splitDrive "\\\\?\\UNC\\shared\\file" == ("\\\\?\\UNC\\shared\\","file")
-- > Windows: splitDrive "\\\\?\\UNCshared\\file" == ("\\\\?\\","UNCshared\\file")
-- > Windows: splitDrive "\\\\?\\d:\\file" == ("\\\\?\\d:\\","file")
-- > Windows: splitDrive "/d" == ("","/d")
-- > Posix: splitDrive "/test" == ("/","test")
-- > Posix: splitDrive "//test" == ("//","test")
-- > Posix: splitDrive "test/file" == ("","test/file")
-- > Posix: splitDrive "file" == ("","file")
splitDrive :: FilePath -> (FilePath, FilePath)
splitDrive x | isPosix = span (== '/') x
splitDrive x | Just y <- readDriveLetter x = y
splitDrive x | Just y <- readDriveUNC x = y
splitDrive x | Just y <- readDriveShare x = y
splitDrive x = ("",x)
addSlash :: FilePath -> FilePath -> (FilePath, FilePath)
addSlash a xs = (a++c,d)
where (c,d) = span isPathSeparator xs
-- See [1].
-- "\\?\D:\<path>" or "\\?\UNC\<server>\<share>"
readDriveUNC :: FilePath -> Maybe (FilePath, FilePath)
readDriveUNC (s1:s2:'?':s3:xs) | all isPathSeparator [s1,s2,s3] =
case map toUpper xs of
('U':'N':'C':s4:_) | isPathSeparator s4 ->
let (a,b) = readDriveShareName (drop 4 xs)
in Just (s1:s2:'?':s3:take 4 xs ++ a, b)
_ -> case readDriveLetter xs of
-- Extended-length path.
Just (a,b) -> Just (s1:s2:'?':s3:a,b)
Nothing -> Nothing
readDriveUNC _ = Nothing
{- c:\ -}
readDriveLetter :: String -> Maybe (FilePath, FilePath)
readDriveLetter (x:':':y:xs) | isLetter x && isPathSeparator y = Just $ addSlash [x,':'] (y:xs)
readDriveLetter (x:':':xs) | isLetter x = Just ([x,':'], xs)
readDriveLetter _ = Nothing
{- \\sharename\ -}
readDriveShare :: String -> Maybe (FilePath, FilePath)
readDriveShare (s1:s2:xs) | isPathSeparator s1 && isPathSeparator s2 =
Just (s1:s2:a,b)
where (a,b) = readDriveShareName xs
readDriveShare _ = Nothing
{- assume you have already seen \\ -}
{- share\bob -> "share\", "bob" -}
readDriveShareName :: String -> (FilePath, FilePath)
readDriveShareName name = addSlash a b
where (a,b) = break isPathSeparator name
-- | Join a drive and the rest of the path.
--
-- > Valid x => uncurry joinDrive (splitDrive x) == x
-- > Windows: joinDrive "C:" "foo" == "C:foo"
-- > Windows: joinDrive "C:\\" "bar" == "C:\\bar"
-- > Windows: joinDrive "\\\\share" "foo" == "\\\\share\\foo"
-- > Windows: joinDrive "/:" "foo" == "/:\\foo"
joinDrive :: FilePath -> FilePath -> FilePath
joinDrive = combineAlways
-- | Get the drive from a filepath.
--
-- > takeDrive x == fst (splitDrive x)
takeDrive :: FilePath -> FilePath
takeDrive = fst . splitDrive
-- | Delete the drive, if it exists.
--
-- > dropDrive x == snd (splitDrive x)
dropDrive :: FilePath -> FilePath
dropDrive = snd . splitDrive
-- | Does a path have a drive.
--
-- > not (hasDrive x) == null (takeDrive x)
-- > Posix: hasDrive "/foo" == True
-- > Windows: hasDrive "C:\\foo" == True
-- > Windows: hasDrive "C:foo" == True
-- > hasDrive "foo" == False
-- > hasDrive "" == False
hasDrive :: FilePath -> Bool
hasDrive = not . null . takeDrive
-- | Is an element a drive
--
-- > Posix: isDrive "/" == True
-- > Posix: isDrive "/foo" == False
-- > Windows: isDrive "C:\\" == True
-- > Windows: isDrive "C:\\foo" == False
-- > isDrive "" == False
isDrive :: FilePath -> Bool
isDrive x = not (null x) && null (dropDrive x)
---------------------------------------------------------------------
-- Operations on a filepath, as a list of directories
-- | Split a filename into directory and file. '</>' is the inverse.
-- The first component will often end with a trailing slash.
--
-- > splitFileName "/directory/file.ext" == ("/directory/","file.ext")
-- > Valid x => uncurry (</>) (splitFileName x) == x || fst (splitFileName x) == "./"
-- > Valid x => isValid (fst (splitFileName x))
-- > splitFileName "file/bob.txt" == ("file/", "bob.txt")
-- > splitFileName "file/" == ("file/", "")
-- > splitFileName "bob" == ("./", "bob")
-- > Posix: splitFileName "/" == ("/","")
-- > Windows: splitFileName "c:" == ("c:","")
splitFileName :: FilePath -> (String, String)
splitFileName x = (if null dir then "./" else dir, name)
where
(dir, name) = splitFileName_ x
-- version of splitFileName where, if the FilePath has no directory
-- component, the returned directory is "" rather than "./". This
-- is used in cases where we are going to combine the returned
-- directory to make a valid FilePath, and having a "./" appear would
-- look strange and upset simple equality properties. See
-- e.g. replaceFileName.
splitFileName_ :: FilePath -> (String, String)
splitFileName_ x = (drv ++ dir, file)
where
(drv,pth) = splitDrive x
(dir,file) = breakEnd isPathSeparator pth
-- | Set the filename.
--
-- > replaceFileName "/directory/other.txt" "file.ext" == "/directory/file.ext"
-- > Valid x => replaceFileName x (takeFileName x) == x
replaceFileName :: FilePath -> String -> FilePath
replaceFileName x y = a </> y where (a,_) = splitFileName_ x
-- | Drop the filename. Unlike 'takeDirectory', this function will leave
-- a trailing path separator on the directory.
--
-- > dropFileName "/directory/file.ext" == "/directory/"
-- > dropFileName x == fst (splitFileName x)
dropFileName :: FilePath -> FilePath
dropFileName = fst . splitFileName
-- | Get the file name.
--
-- > takeFileName "/directory/file.ext" == "file.ext"
-- > takeFileName "test/" == ""
-- > takeFileName x `isSuffixOf` x
-- > takeFileName x == snd (splitFileName x)
-- > Valid x => takeFileName (replaceFileName x "fred") == "fred"
-- > Valid x => takeFileName (x </> "fred") == "fred"
-- > Valid x => isRelative (takeFileName x)
takeFileName :: FilePath -> FilePath
takeFileName = snd . splitFileName
-- | Get the base name, without an extension or path.
--
-- > takeBaseName "/directory/file.ext" == "file"
-- > takeBaseName "file/test.txt" == "test"
-- > takeBaseName "dave.ext" == "dave"
-- > takeBaseName "" == ""
-- > takeBaseName "test" == "test"
-- > takeBaseName (addTrailingPathSeparator x) == ""
-- > takeBaseName "file/file.tar.gz" == "file.tar"
takeBaseName :: FilePath -> String
takeBaseName = dropExtension . takeFileName
-- | Set the base name.
--
-- > replaceBaseName "/directory/other.ext" "file" == "/directory/file.ext"
-- > replaceBaseName "file/test.txt" "bob" == "file/bob.txt"
-- > replaceBaseName "fred" "bill" == "bill"
-- > replaceBaseName "/dave/fred/bob.gz.tar" "new" == "/dave/fred/new.tar"
-- > Valid x => replaceBaseName x (takeBaseName x) == x
replaceBaseName :: FilePath -> String -> FilePath
replaceBaseName pth nam = combineAlways a (nam <.> ext)
where
(a,b) = splitFileName_ pth
ext = takeExtension b
-- | Is an item either a directory or the last character a path separator?
--
-- > hasTrailingPathSeparator "test" == False
-- > hasTrailingPathSeparator "test/" == True
hasTrailingPathSeparator :: FilePath -> Bool
hasTrailingPathSeparator "" = False
hasTrailingPathSeparator x = isPathSeparator (last x)
hasLeadingPathSeparator :: FilePath -> Bool
hasLeadingPathSeparator "" = False
hasLeadingPathSeparator x = isPathSeparator (head x)
-- | Add a trailing file path separator if one is not already present.
--
-- > hasTrailingPathSeparator (addTrailingPathSeparator x)
-- > hasTrailingPathSeparator x ==> addTrailingPathSeparator x == x
-- > Posix: addTrailingPathSeparator "test/rest" == "test/rest/"
addTrailingPathSeparator :: FilePath -> FilePath
addTrailingPathSeparator x = if hasTrailingPathSeparator x then x else x ++ [pathSeparator]
-- | Remove any trailing path separators
--
-- > dropTrailingPathSeparator "file/test/" == "file/test"
-- > dropTrailingPathSeparator "/" == "/"
-- > Windows: dropTrailingPathSeparator "\\" == "\\"
-- > Posix: not (hasTrailingPathSeparator (dropTrailingPathSeparator x)) || isDrive x
dropTrailingPathSeparator :: FilePath -> FilePath
dropTrailingPathSeparator x =
if hasTrailingPathSeparator x && not (isDrive x)
then let x' = dropWhileEnd isPathSeparator x
in if null x' then [last x] else x'
else x
-- | Get the directory name, move up one level.
--
-- > takeDirectory "/directory/other.ext" == "/directory"
-- > takeDirectory x `isPrefixOf` x || takeDirectory x == "."
-- > takeDirectory "foo" == "."
-- > takeDirectory "/" == "/"
-- > takeDirectory "/foo" == "/"
-- > takeDirectory "/foo/bar/baz" == "/foo/bar"
-- > takeDirectory "/foo/bar/baz/" == "/foo/bar/baz"
-- > takeDirectory "foo/bar/baz" == "foo/bar"
-- > Windows: takeDirectory "foo\\bar" == "foo"
-- > Windows: takeDirectory "foo\\bar\\\\" == "foo\\bar"
-- > Windows: takeDirectory "C:\\" == "C:\\"
takeDirectory :: FilePath -> FilePath
takeDirectory = dropTrailingPathSeparator . dropFileName
-- | Set the directory, keeping the filename the same.
--
-- > replaceDirectory "root/file.ext" "/directory/" == "/directory/file.ext"
-- > Valid x => replaceDirectory x (takeDirectory x) `equalFilePath` x
replaceDirectory :: FilePath -> String -> FilePath
replaceDirectory x dir = combineAlways dir (takeFileName x)
-- | An alias for '</>'.
combine :: FilePath -> FilePath -> FilePath
combine a b | hasLeadingPathSeparator b || hasDrive b = b
| otherwise = combineAlways a b
-- | Combine two paths, assuming rhs is NOT absolute.
combineAlways :: FilePath -> FilePath -> FilePath
combineAlways a b | null a = b
| null b = a
| hasTrailingPathSeparator a = a ++ b
| otherwise = case a of
[a1,':'] | isWindows && isLetter a1 -> a ++ b
_ -> a ++ [pathSeparator] ++ b
-- | Combine two paths with a path separator.
-- If the second path starts with a path separator or a drive letter, then it returns the second.
-- The intention is that @readFile (dir '</>' file)@ will access the same file as
-- @setCurrentDirectory dir; readFile file@.
--
-- > Posix: "/directory" </> "file.ext" == "/directory/file.ext"
-- > Windows: "/directory" </> "file.ext" == "/directory\\file.ext"
-- > "directory" </> "/file.ext" == "/file.ext"
-- > Valid x => (takeDirectory x </> takeFileName x) `equalFilePath` x
--
-- Combined:
--
-- > Posix: "/" </> "test" == "/test"
-- > Posix: "home" </> "bob" == "home/bob"
-- > Posix: "x:" </> "foo" == "x:/foo"
-- > Windows: "C:\\foo" </> "bar" == "C:\\foo\\bar"
-- > Windows: "home" </> "bob" == "home\\bob"
--
-- Not combined:
--
-- > Posix: "home" </> "/bob" == "/bob"
-- > Windows: "home" </> "C:\\bob" == "C:\\bob"
--
-- Not combined (tricky):
--
-- On Windows, if a filepath starts with a single slash, it is relative to the
-- root of the current drive. In [1], this is (confusingly) referred to as an
-- absolute path.
-- The current behavior of '</>' is to never combine these forms.
--
-- > Windows: "home" </> "/bob" == "/bob"
-- > Windows: "home" </> "\\bob" == "\\bob"
-- > Windows: "C:\\home" </> "\\bob" == "\\bob"
--
-- On Windows, from [1]: "If a file name begins with only a disk designator
-- but not the backslash after the colon, it is interpreted as a relative path
-- to the current directory on the drive with the specified letter."
-- The current behavior of '</>' is to never combine these forms.
--
-- > Windows: "D:\\foo" </> "C:bar" == "C:bar"
-- > Windows: "C:\\foo" </> "C:bar" == "C:bar"
(</>) :: FilePath -> FilePath -> FilePath
(</>) = combine
-- | Split a path by the directory separator.
--
-- > splitPath "/directory/file.ext" == ["/","directory/","file.ext"]
-- > concat (splitPath x) == x
-- > splitPath "test//item/" == ["test//","item/"]
-- > splitPath "test/item/file" == ["test/","item/","file"]
-- > splitPath "" == []
-- > Windows: splitPath "c:\\test\\path" == ["c:\\","test\\","path"]
-- > Posix: splitPath "/file/test" == ["/","file/","test"]
splitPath :: FilePath -> [FilePath]
splitPath x = [drive | drive /= ""] ++ f path
where
(drive,path) = splitDrive x
f "" = []
f y = (a++c) : f d
where
(a,b) = break isPathSeparator y
(c,d) = span isPathSeparator b
-- | Just as 'splitPath', but don't add the trailing slashes to each element.
--
-- > splitDirectories "/directory/file.ext" == ["/","directory","file.ext"]
-- > splitDirectories "test/file" == ["test","file"]
-- > splitDirectories "/test/file" == ["/","test","file"]
-- > Windows: splitDirectories "C:\\test\\file" == ["C:\\", "test", "file"]
-- > Valid x => joinPath (splitDirectories x) `equalFilePath` x
-- > splitDirectories "" == []
-- > Windows: splitDirectories "C:\\test\\\\\\file" == ["C:\\", "test", "file"]
-- > splitDirectories "/test///file" == ["/","test","file"]
splitDirectories :: FilePath -> [FilePath]
splitDirectories = map dropTrailingPathSeparator . splitPath
-- | Join path elements back together.
--
-- > joinPath ["/","directory/","file.ext"] == "/directory/file.ext"
-- > Valid x => joinPath (splitPath x) == x
-- > joinPath [] == ""
-- > Posix: joinPath ["test","file","path"] == "test/file/path"
joinPath :: [FilePath] -> FilePath
-- Note that this definition on c:\\c:\\, join then split will give c:\\.
joinPath = foldr combine ""
---------------------------------------------------------------------
-- File name manipulators
-- | Equality of two 'FilePath's.
-- If you call @System.Directory.canonicalizePath@
-- first this has a much better chance of working.
-- Note that this doesn't follow symlinks or DOSNAM~1s.
--
-- > x == y ==> equalFilePath x y
-- > normalise x == normalise y ==> equalFilePath x y
-- > equalFilePath "foo" "foo/"
-- > not (equalFilePath "foo" "/foo")
-- > Posix: not (equalFilePath "foo" "FOO")
-- > Windows: equalFilePath "foo" "FOO"
-- > Windows: not (equalFilePath "C:" "C:/")
equalFilePath :: FilePath -> FilePath -> Bool
equalFilePath a b = f a == f b
where
f x | isWindows = dropTrailingPathSeparator $ map toLower $ normalise x
| otherwise = dropTrailingPathSeparator $ normalise x
-- | Contract a filename, based on a relative path. Note that the resulting path
-- will never introduce @..@ paths, as the presence of symlinks means @..\/b@
-- may not reach @a\/b@ if it starts from @a\/c@. For a worked example see
-- <http://neilmitchell.blogspot.co.uk/2015/10/filepaths-are-subtle-symlinks-are-hard.html this blog post>.
--
-- The corresponding @makeAbsolute@ function can be found in
-- @System.Directory@.
--
-- > makeRelative "/directory" "/directory/file.ext" == "file.ext"
-- > Valid x => makeRelative (takeDirectory x) x `equalFilePath` takeFileName x
-- > makeRelative x x == "."
-- > Valid x y => equalFilePath x y || (isRelative x && makeRelative y x == x) || equalFilePath (y </> makeRelative y x) x
-- > Windows: makeRelative "C:\\Home" "c:\\home\\bob" == "bob"
-- > Windows: makeRelative "C:\\Home" "c:/home/bob" == "bob"
-- > Windows: makeRelative "C:\\Home" "D:\\Home\\Bob" == "D:\\Home\\Bob"
-- > Windows: makeRelative "C:\\Home" "C:Home\\Bob" == "C:Home\\Bob"
-- > Windows: makeRelative "/Home" "/home/bob" == "bob"
-- > Windows: makeRelative "/" "//" == "//"
-- > Posix: makeRelative "/Home" "/home/bob" == "/home/bob"
-- > Posix: makeRelative "/home/" "/home/bob/foo/bar" == "bob/foo/bar"
-- > Posix: makeRelative "/fred" "bob" == "bob"
-- > Posix: makeRelative "/file/test" "/file/test/fred" == "fred"
-- > Posix: makeRelative "/file/test" "/file/test/fred/" == "fred/"
-- > Posix: makeRelative "some/path" "some/path/a/b/c" == "a/b/c"
makeRelative :: FilePath -> FilePath -> FilePath
makeRelative root path
| equalFilePath root path = "."
| takeAbs root /= takeAbs path = path
| otherwise = f (dropAbs root) (dropAbs path)
where
f "" y = dropWhile isPathSeparator y
f x y = let (x1,x2) = g x
(y1,y2) = g y
in if equalFilePath x1 y1 then f x2 y2 else path
g x = (dropWhile isPathSeparator a, dropWhile isPathSeparator b)
where (a,b) = break isPathSeparator $ dropWhile isPathSeparator x
-- on windows, need to drop '/' which is kind of absolute, but not a drive
dropAbs x | hasLeadingPathSeparator x && not (hasDrive x) = tail x
dropAbs x = dropDrive x
takeAbs x | hasLeadingPathSeparator x && not (hasDrive x) = [pathSeparator]
takeAbs x = map (\y -> if isPathSeparator y then pathSeparator else toLower y) $ takeDrive x
-- | Normalise a file
--
-- * \/\/ outside of the drive can be made blank
--
-- * \/ -> 'pathSeparator'
--
-- * .\/ -> \"\"
--
-- > Posix: normalise "/file/\\test////" == "/file/\\test/"
-- > Posix: normalise "/file/./test" == "/file/test"
-- > Posix: normalise "/test/file/../bob/fred/" == "/test/file/../bob/fred/"
-- > Posix: normalise "../bob/fred/" == "../bob/fred/"
-- > Posix: normalise "./bob/fred/" == "bob/fred/"
-- > Windows: normalise "c:\\file/bob\\" == "C:\\file\\bob\\"
-- > Windows: normalise "c:\\" == "C:\\"
-- > Windows: normalise "C:.\\" == "C:"
-- > Windows: normalise "\\\\server\\test" == "\\\\server\\test"
-- > Windows: normalise "//server/test" == "\\\\server\\test"
-- > Windows: normalise "c:/file" == "C:\\file"
-- > Windows: normalise "/file" == "\\file"
-- > Windows: normalise "\\" == "\\"
-- > Windows: normalise "/./" == "\\"
-- > normalise "." == "."
-- > Posix: normalise "./" == "./"
-- > Posix: normalise "./." == "./"
-- > Posix: normalise "/./" == "/"
-- > Posix: normalise "/" == "/"
-- > Posix: normalise "bob/fred/." == "bob/fred/"
-- > Posix: normalise "//home" == "/home"
normalise :: FilePath -> FilePath
normalise path = result ++ [pathSeparator | addPathSeparator]
where
(drv,pth) = splitDrive path
result = joinDrive' (normaliseDrive drv) (f pth)
joinDrive' "" "" = "."
joinDrive' d p = joinDrive d p
addPathSeparator = isDirPath pth
&& not (hasTrailingPathSeparator result)
&& not (isRelativeDrive drv)
isDirPath xs = hasTrailingPathSeparator xs
|| not (null xs) && last xs == '.' && hasTrailingPathSeparator (init xs)
f = joinPath . dropDots . propSep . splitDirectories
propSep (x:xs) | all isPathSeparator x = [pathSeparator] : xs
| otherwise = x : xs
propSep [] = []
dropDots = filter ("." /=)
normaliseDrive :: FilePath -> FilePath
normaliseDrive "" = ""
normaliseDrive _ | isPosix = [pathSeparator]
normaliseDrive drive = if isJust $ readDriveLetter x2
then map toUpper x2
else x2
where
x2 = map repSlash drive
repSlash x = if isPathSeparator x then pathSeparator else x
-- Information for validity functions on Windows. See [1].
isBadCharacter :: Char -> Bool
isBadCharacter x = x >= '\0' && x <= '\31' || x `elem` ":*?><|\""
badElements :: [FilePath]
badElements =
["CON","PRN","AUX","NUL","CLOCK$"
,"COM1","COM2","COM3","COM4","COM5","COM6","COM7","COM8","COM9"
,"LPT1","LPT2","LPT3","LPT4","LPT5","LPT6","LPT7","LPT8","LPT9"]
-- | Is a FilePath valid, i.e. could you create a file like it? This function checks for invalid names,
-- and invalid characters, but does not check if length limits are exceeded, as these are typically
-- filesystem dependent.
--
-- > isValid "" == False
-- > isValid "\0" == False
-- > Posix: isValid "/random_ path:*" == True
-- > Posix: isValid x == not (null x)
-- > Windows: isValid "c:\\test" == True
-- > Windows: isValid "c:\\test:of_test" == False
-- > Windows: isValid "test*" == False
-- > Windows: isValid "c:\\test\\nul" == False
-- > Windows: isValid "c:\\test\\prn.txt" == False
-- > Windows: isValid "c:\\nul\\file" == False
-- > Windows: isValid "\\\\" == False
-- > Windows: isValid "\\\\\\foo" == False
-- > Windows: isValid "\\\\?\\D:file" == False
-- > Windows: isValid "foo\tbar" == False
-- > Windows: isValid "nul .txt" == False
-- > Windows: isValid " nul.txt" == True
isValid :: FilePath -> Bool
isValid "" = False
isValid x | '\0' `elem` x = False
isValid _ | isPosix = True
isValid path =
not (any isBadCharacter x2) &&
not (any f $ splitDirectories x2) &&
not (isJust (readDriveShare x1) && all isPathSeparator x1) &&
not (isJust (readDriveUNC x1) && not (hasTrailingPathSeparator x1))
where
(x1,x2) = splitDrive path
f x = map toUpper (dropWhileEnd (== ' ') $ dropExtensions x) `elem` badElements
-- | Take a FilePath and make it valid; does not change already valid FilePaths.
--
-- > isValid (makeValid x)
-- > isValid x ==> makeValid x == x
-- > makeValid "" == "_"
-- > makeValid "file\0name" == "file_name"
-- > Windows: makeValid "c:\\already\\/valid" == "c:\\already\\/valid"
-- > Windows: makeValid "c:\\test:of_test" == "c:\\test_of_test"
-- > Windows: makeValid "test*" == "test_"
-- > Windows: makeValid "c:\\test\\nul" == "c:\\test\\nul_"
-- > Windows: makeValid "c:\\test\\prn.txt" == "c:\\test\\prn_.txt"
-- > Windows: makeValid "c:\\test/prn.txt" == "c:\\test/prn_.txt"
-- > Windows: makeValid "c:\\nul\\file" == "c:\\nul_\\file"
-- > Windows: makeValid "\\\\\\foo" == "\\\\drive"
-- > Windows: makeValid "\\\\?\\D:file" == "\\\\?\\D:\\file"
-- > Windows: makeValid "nul .txt" == "nul _.txt"
makeValid :: FilePath -> FilePath
makeValid "" = "_"
makeValid path
| isPosix = map (\x -> if x == '\0' then '_' else x) path
| isJust (readDriveShare drv) && all isPathSeparator drv = take 2 drv ++ "drive"
| isJust (readDriveUNC drv) && not (hasTrailingPathSeparator drv) =
makeValid (drv ++ [pathSeparator] ++ pth)
| otherwise = joinDrive drv $ validElements $ validChars pth
where
(drv,pth) = splitDrive path
validChars = map f
f x = if isBadCharacter x then '_' else x
validElements x = joinPath $ map g $ splitPath x
g x = h a ++ b
where (a,b) = break isPathSeparator x
h x = if map toUpper (dropWhileEnd (== ' ') a) `elem` badElements then a ++ "_" <.> b else x
where (a,b) = splitExtensions x
-- | Is a path relative, or is it fixed to the root?
--
-- > Windows: isRelative "path\\test" == True
-- > Windows: isRelative "c:\\test" == False
-- > Windows: isRelative "c:test" == True
-- > Windows: isRelative "c:\\" == False
-- > Windows: isRelative "c:/" == False
-- > Windows: isRelative "c:" == True
-- > Windows: isRelative "\\\\foo" == False
-- > Windows: isRelative "\\\\?\\foo" == False
-- > Windows: isRelative "\\\\?\\UNC\\foo" == False
-- > Windows: isRelative "/foo" == True
-- > Windows: isRelative "\\foo" == True
-- > Posix: isRelative "test/path" == True
-- > Posix: isRelative "/test" == False
-- > Posix: isRelative "/" == False
--
-- According to [1]:
--
-- * "A UNC name of any format [is never relative]."
--
-- * "You cannot use the "\\?\" prefix with a relative path."
isRelative :: FilePath -> Bool
isRelative x = null drive || isRelativeDrive drive
where drive = takeDrive x
{- c:foo -}
-- From [1]: "If a file name begins with only a disk designator but not the
-- backslash after the colon, it is interpreted as a relative path to the
-- current directory on the drive with the specified letter."
isRelativeDrive :: String -> Bool
isRelativeDrive x =
maybe False (not . hasTrailingPathSeparator . fst) (readDriveLetter x)
-- | @not . 'isRelative'@
--
-- > isAbsolute x == not (isRelative x)
isAbsolute :: FilePath -> Bool
isAbsolute = not . isRelative
-----------------------------------------------------------------------------
-- dropWhileEnd (>2) [1,2,3,4,1,2,3,4] == [1,2,3,4,1,2])
-- Note that Data.List.dropWhileEnd is only available in base >= 4.5.
dropWhileEnd :: (a -> Bool) -> [a] -> [a]
dropWhileEnd p = reverse . dropWhile p . reverse
-- takeWhileEnd (>2) [1,2,3,4,1,2,3,4] == [3,4])
takeWhileEnd :: (a -> Bool) -> [a] -> [a]
takeWhileEnd p = reverse . takeWhile p . reverse
-- spanEnd (>2) [1,2,3,4,1,2,3,4] = ([1,2,3,4,1,2], [3,4])
spanEnd :: (a -> Bool) -> [a] -> ([a], [a])
spanEnd p xs = (dropWhileEnd p xs, takeWhileEnd p xs)
-- breakEnd (< 2) [1,2,3,4,1,2,3,4] == ([1,2,3,4,1],[2,3,4])
breakEnd :: (a -> Bool) -> [a] -> ([a], [a])
breakEnd p = spanEnd (not . p)
-- | The stripSuffix function drops the given suffix from a list. It returns
-- Nothing if the list did not end with the suffix given, or Just the list
-- before the suffix, if it does.
stripSuffix :: Eq a => [a] -> [a] -> Maybe [a]
stripSuffix xs ys = fmap reverse $ stripPrefix (reverse xs) (reverse ys)
| phischu/fragnix | tests/packages/scotty/System.FilePath.Windows.hs | bsd-3-clause | 38,649 | 0 | 18 | 7,510 | 5,160 | 2,971 | 2,189 | 303 | 6 |
{-# LANGUAGE OverloadedStrings #-}
{-# LANGUAGE RecordWildCards #-}
{-# LANGUAGE ScopedTypeVariables #-}
module Data.Streaming.ByteString.BuilderSpec
( spec
, builderSpec
, BuilderFunctions(..)
) where
import qualified Data.ByteString as S
import Data.ByteString.Char8 ()
import qualified Data.ByteString.Unsafe as S
import qualified Data.ByteString.Builder as B
import qualified Data.ByteString.Builder.Internal as B
import qualified Data.ByteString.Lazy as L
import Data.ByteString.Lazy.Char8 ()
import Data.IORef
import Data.Maybe
import Data.Monoid
import Test.Hspec
import Test.Hspec.QuickCheck (prop)
import Data.Streaming.ByteString.Builder
import Data.Streaming.ByteString.Builder.Class
data BuilderFunctions b = BuilderFunctions
{ bfFromByteString :: S.ByteString -> b
, bfInsertLazyByteString :: L.ByteString -> b
, bfToLazyByteString :: b -> L.ByteString
, bfInsertByteString :: S.ByteString -> b
, bfCopyByteString :: S.ByteString -> b
}
tester :: StreamingBuilder b => BufferAllocStrategy -> [b] -> IO [S.ByteString]
tester strat builders0 = do
(recv, finish) <- newBuilderRecv strat
let loop front [] = do
mbs <- finish
return $ front $ maybe [] return mbs
loop front0 (bu:bus) = do
popper <- recv bu
let go front = do
bs <- popper
if S.null bs
then loop front bus
else go (front . (bs:))
go front0
loop id builders0
testerFlush :: StreamingBuilder b
=> BufferAllocStrategy -> [Maybe b] -> IO [Maybe S.ByteString]
testerFlush strat builders0 = do
(recv, finish) <- newBuilderRecv strat
let loop front [] = do
mbs <- finish
return $ front $ maybe [] (return . Just) mbs
loop front0 (mbu:bus) = do
popper <- recv $ fromMaybe builderFlush mbu
let go front = do
bs <- popper
if S.null bs
then
case mbu of
Nothing -> loop (front . (Nothing:)) bus
Just _ -> loop front bus
else go (front . (Just bs:))
go front0
loop id builders0
builderSpec :: forall b. StreamingBuilder b => BuilderFunctions b -> Spec
builderSpec BuilderFunctions{..} = do
prop "idempotent to toLazyByteString" $ \bss' -> do
let bss = map S.pack bss'
let builders :: [b]
builders = map bfFromByteString bss
let lbs = bfToLazyByteString $ mconcat builders
outBss <- tester defaultStrategy builders
L.fromChunks outBss `shouldBe` lbs
it "works for large input" $ do
let builders :: [b]
builders = replicate 10000 (bfFromByteString "hello world!" :: b)
let lbs = bfToLazyByteString $ mconcat builders
outBss <- tester defaultStrategy builders
L.fromChunks outBss `shouldBe` lbs
it "works for lazy bytestring insertion" $ do
let builders :: [b]
builders = replicate 10000 (bfInsertLazyByteString "hello world!")
let lbs = bfToLazyByteString $ mconcat builders
outBss <- tester defaultStrategy builders
L.fromChunks outBss `shouldBe` lbs
prop "works for strict bytestring insertion" $ \bs' -> do
let bs = S.pack bs'
let builders :: [b]
builders = replicate 10000 (bfCopyByteString bs `mappend` bfInsertByteString bs)
let lbs = bfToLazyByteString $ mconcat builders
outBss <- tester defaultStrategy builders
L.fromChunks outBss `shouldBe` lbs
it "flush shouldn't bring in empty strings." $ do
let dat = ["hello", "world"]
builders :: [b]
builders = map ((`mappend` builderFlush) . bfFromByteString) dat
out <- tester defaultStrategy builders
dat `shouldBe` out
prop "flushing" $ \bss' -> do
let bss = concatMap (\bs -> [Just $ S.pack bs, Nothing]) $ filter (not . null) bss'
let builders :: [Maybe b]
builders = map (fmap bfFromByteString) bss
outBss <- testerFlush defaultStrategy builders
outBss `shouldBe` bss
it "large flush input" $ do
let lbs = L.pack $ concat $ replicate 100000 [0..255]
chunks :: [Maybe b]
chunks = map (Just . bfFromByteString) (L.toChunks lbs)
bss <- testerFlush defaultStrategy chunks
L.fromChunks (catMaybes bss) `shouldBe` lbs
spec :: Spec
spec =
describe "Data.Streaming.ByteString.Builder" $ do
builderSpec BuilderFunctions
{ bfFromByteString = B.byteString
, bfInsertLazyByteString = B.lazyByteStringInsert
, bfToLazyByteString = B.toLazyByteString
, bfInsertByteString = B.byteStringInsert
, bfCopyByteString = B.byteStringCopy
}
prop "toByteStringIO idempotent to toLazyByteString" $ \bss' -> do
let bss = mconcat (map (B.byteString . S.pack) bss')
ior <- newIORef []
toByteStringIOWith 16
(\s -> do s' <- S.useAsCStringLen s S.unsafePackCStringLen
modifyIORef ior (s' :))
bss
chunks <- readIORef ior
L.fromChunks (reverse chunks) `shouldBe` B.toLazyByteString bss
| phadej/streaming-commons | test/Data/Streaming/ByteString/BuilderSpec.hs | mit | 5,520 | 0 | 24 | 1,779 | 1,563 | 793 | 770 | 124 | 4 |
{-# LANGUAGE DataKinds #-}
{-# LANGUAGE DeriveGeneric #-}
{-# LANGUAGE FlexibleInstances #-}
{-# LANGUAGE GeneralizedNewtypeDeriving #-}
{-# LANGUAGE LambdaCase #-}
{-# LANGUAGE NoImplicitPrelude #-}
{-# LANGUAGE OverloadedStrings #-}
{-# LANGUAGE RecordWildCards #-}
{-# LANGUAGE TypeFamilies #-}
{-# OPTIONS_GHC -fno-warn-unused-imports #-}
-- Module : Network.AWS.SQS.ChangeMessageVisibilityBatch
-- Copyright : (c) 2013-2014 Brendan Hay <brendan.g.hay@gmail.com>
-- License : This Source Code Form is subject to the terms of
-- the Mozilla Public License, v. 2.0.
-- A copy of the MPL can be found in the LICENSE file or
-- you can obtain it at http://mozilla.org/MPL/2.0/.
-- Maintainer : Brendan Hay <brendan.g.hay@gmail.com>
-- Stability : experimental
-- Portability : non-portable (GHC extensions)
--
-- Derived from AWS service descriptions, licensed under Apache 2.0.
-- | Changes the visibility timeout of multiple messages. This is a batch version
-- of 'ChangeMessageVisibility'. The result of the action on each message is
-- reported individually in the response. You can send up to 10 'ChangeMessageVisibility' requests with each 'ChangeMessageVisibilityBatch' action.
--
-- Because the batch request can result in a combination of successful and
-- unsuccessful actions, you should check for batch errors even when the call
-- returns an HTTP status code of 200. Some API actions take lists of
-- parameters. These lists are specified using the 'param.n' notation. Values of 'n'
-- are integers starting from 1. For example, a parameter list with two elements
-- looks like this: '&Attribute.1=this'
--
-- '&Attribute.2=that'
--
-- <http://docs.aws.amazon.com/AWSSimpleQueueService/latest/APIReference/API_ChangeMessageVisibilityBatch.html>
module Network.AWS.SQS.ChangeMessageVisibilityBatch
(
-- * Request
ChangeMessageVisibilityBatch
-- ** Request constructor
, changeMessageVisibilityBatch
-- ** Request lenses
, cmvbEntries
, cmvbQueueUrl
-- * Response
, ChangeMessageVisibilityBatchResponse
-- ** Response constructor
, changeMessageVisibilityBatchResponse
-- ** Response lenses
, cmvbrFailed
, cmvbrSuccessful
) where
import Network.AWS.Prelude
import Network.AWS.Request.Query
import Network.AWS.SQS.Types
import qualified GHC.Exts
data ChangeMessageVisibilityBatch = ChangeMessageVisibilityBatch
{ _cmvbEntries :: List "member" ChangeMessageVisibilityBatchRequestEntry
, _cmvbQueueUrl :: Text
} deriving (Eq, Read, Show)
-- | 'ChangeMessageVisibilityBatch' constructor.
--
-- The fields accessible through corresponding lenses are:
--
-- * 'cmvbEntries' @::@ ['ChangeMessageVisibilityBatchRequestEntry']
--
-- * 'cmvbQueueUrl' @::@ 'Text'
--
changeMessageVisibilityBatch :: Text -- ^ 'cmvbQueueUrl'
-> ChangeMessageVisibilityBatch
changeMessageVisibilityBatch p1 = ChangeMessageVisibilityBatch
{ _cmvbQueueUrl = p1
, _cmvbEntries = mempty
}
-- | A list of receipt handles of the messages for which the visibility timeout
-- must be changed.
cmvbEntries :: Lens' ChangeMessageVisibilityBatch [ChangeMessageVisibilityBatchRequestEntry]
cmvbEntries = lens _cmvbEntries (\s a -> s { _cmvbEntries = a }) . _List
-- | The URL of the Amazon SQS queue to take action on.
cmvbQueueUrl :: Lens' ChangeMessageVisibilityBatch Text
cmvbQueueUrl = lens _cmvbQueueUrl (\s a -> s { _cmvbQueueUrl = a })
data ChangeMessageVisibilityBatchResponse = ChangeMessageVisibilityBatchResponse
{ _cmvbrFailed :: List "member" BatchResultErrorEntry
, _cmvbrSuccessful :: List "member" ChangeMessageVisibilityBatchResultEntry
} deriving (Eq, Read, Show)
-- | 'ChangeMessageVisibilityBatchResponse' constructor.
--
-- The fields accessible through corresponding lenses are:
--
-- * 'cmvbrFailed' @::@ ['BatchResultErrorEntry']
--
-- * 'cmvbrSuccessful' @::@ ['ChangeMessageVisibilityBatchResultEntry']
--
changeMessageVisibilityBatchResponse :: ChangeMessageVisibilityBatchResponse
changeMessageVisibilityBatchResponse = ChangeMessageVisibilityBatchResponse
{ _cmvbrSuccessful = mempty
, _cmvbrFailed = mempty
}
-- | A list of 'BatchResultErrorEntry' items.
cmvbrFailed :: Lens' ChangeMessageVisibilityBatchResponse [BatchResultErrorEntry]
cmvbrFailed = lens _cmvbrFailed (\s a -> s { _cmvbrFailed = a }) . _List
-- | A list of 'ChangeMessageVisibilityBatchResultEntry' items.
cmvbrSuccessful :: Lens' ChangeMessageVisibilityBatchResponse [ChangeMessageVisibilityBatchResultEntry]
cmvbrSuccessful = lens _cmvbrSuccessful (\s a -> s { _cmvbrSuccessful = a }) . _List
instance ToPath ChangeMessageVisibilityBatch where
toPath = const "/"
instance ToQuery ChangeMessageVisibilityBatch where
toQuery ChangeMessageVisibilityBatch{..} = mconcat
[ toQuery _cmvbEntries
, "QueueUrl" =? _cmvbQueueUrl
]
instance ToHeaders ChangeMessageVisibilityBatch
instance AWSRequest ChangeMessageVisibilityBatch where
type Sv ChangeMessageVisibilityBatch = SQS
type Rs ChangeMessageVisibilityBatch = ChangeMessageVisibilityBatchResponse
request = post "ChangeMessageVisibilityBatch"
response = xmlResponse
instance FromXML ChangeMessageVisibilityBatchResponse where
parseXML = withElement "ChangeMessageVisibilityBatchResult" $ \x -> ChangeMessageVisibilityBatchResponse
<$> parseXML x
<*> parseXML x
| romanb/amazonka | amazonka-sqs/gen/Network/AWS/SQS/ChangeMessageVisibilityBatch.hs | mpl-2.0 | 5,585 | 0 | 10 | 1,019 | 577 | 351 | 226 | 65 | 1 |
module Oracles.Setting (
configFile, Setting (..), SettingList (..), setting, settingList, getSetting,
getSettingList, anyTargetPlatform, anyTargetOs, anyTargetArch, anyHostOs,
ghcWithInterpreter, ghcEnableTablesNextToCode, useLibFFIForAdjustors,
ghcCanonVersion, cmdLineLengthLimit, iosHost, osxHost, windowsHost,
topDirectory, libsuf
) where
import Hadrian.Expression
import Hadrian.Oracles.TextFile
import Hadrian.Oracles.Path
import Base
-- | Each 'Setting' comes from the file @hadrian/cfg/system.config@, generated
-- by the @configure@ script from the input file @hadrian/cfg/system.config.in@.
-- For example, the line
--
-- > target-os = mingw32
--
-- sets the value of the setting 'TargetOs'. The action 'setting' 'TargetOs'
-- looks up the value of the setting and returns the string @"mingw32"@,
-- tracking the result in the Shake database.
data Setting = BuildArch
| BuildOs
| BuildPlatform
| BuildVendor
| CcClangBackend
| CcLlvmBackend
| CursesLibDir
| DynamicExtension
| FfiIncludeDir
| FfiLibDir
| GhcMajorVersion
| GhcMinorVersion
| GhcPatchLevel
| GhcVersion
| GhcSourcePath
| GmpIncludeDir
| GmpLibDir
| HostArch
| HostOs
| HostPlatform
| HostVendor
| IconvIncludeDir
| IconvLibDir
| LlvmTarget
| ProjectGitCommitId
| ProjectName
| ProjectVersion
| ProjectVersionInt
| ProjectPatchLevel
| ProjectPatchLevel1
| ProjectPatchLevel2
| SystemGhc
| TargetArch
| TargetOs
| TargetPlatform
| TargetPlatformFull
| TargetVendor
-- TODO: Reduce the variety of similar flags (e.g. CPP and non-CPP versions).
-- | Each 'SettingList' comes from the file @hadrian/cfg/system.config@,
-- generated by the @configure@ script from the input file
-- @hadrian/cfg/system.config.in@. For example, the line
--
-- > hs-cpp-args = -E -undef -traditional
--
-- sets the value of 'HsCppArgs'. The action 'settingList' 'HsCppArgs' looks up
-- the value of the setting and returns the list of strings
-- @["-E", "-undef", "-traditional"]@, tracking the result in the Shake database.
data SettingList = ConfCcArgs Stage
| ConfCppArgs Stage
| ConfGccLinkerArgs Stage
| ConfLdLinkerArgs Stage
| HsCppArgs
-- | Look up the value of a 'Setting' in @cfg/system.config@, tracking the
-- result.
setting :: Setting -> Action String
setting key = lookupValueOrError configFile $ case key of
BuildArch -> "build-arch"
BuildOs -> "build-os"
BuildPlatform -> "build-platform"
BuildVendor -> "build-vendor"
CcClangBackend -> "cc-clang-backend"
CcLlvmBackend -> "cc-llvm-backend"
CursesLibDir -> "curses-lib-dir"
DynamicExtension -> "dynamic-extension"
FfiIncludeDir -> "ffi-include-dir"
FfiLibDir -> "ffi-lib-dir"
GhcMajorVersion -> "ghc-major-version"
GhcMinorVersion -> "ghc-minor-version"
GhcPatchLevel -> "ghc-patch-level"
GhcVersion -> "ghc-version"
GhcSourcePath -> "ghc-source-path"
GmpIncludeDir -> "gmp-include-dir"
GmpLibDir -> "gmp-lib-dir"
HostArch -> "host-arch"
HostOs -> "host-os"
HostPlatform -> "host-platform"
HostVendor -> "host-vendor"
IconvIncludeDir -> "iconv-include-dir"
IconvLibDir -> "iconv-lib-dir"
LlvmTarget -> "llvm-target"
ProjectGitCommitId -> "project-git-commit-id"
ProjectName -> "project-name"
ProjectVersion -> "project-version"
ProjectVersionInt -> "project-version-int"
ProjectPatchLevel -> "project-patch-level"
ProjectPatchLevel1 -> "project-patch-level1"
ProjectPatchLevel2 -> "project-patch-level2"
SystemGhc -> "system-ghc"
TargetArch -> "target-arch"
TargetOs -> "target-os"
TargetPlatform -> "target-platform"
TargetPlatformFull -> "target-platform-full"
TargetVendor -> "target-vendor"
-- | Look up the value of a 'SettingList' in @cfg/system.config@, tracking the
-- result.
settingList :: SettingList -> Action [String]
settingList key = fmap words $ lookupValueOrError configFile $ case key of
ConfCcArgs stage -> "conf-cc-args-" ++ stageString stage
ConfCppArgs stage -> "conf-cpp-args-" ++ stageString stage
ConfGccLinkerArgs stage -> "conf-gcc-linker-args-" ++ stageString stage
ConfLdLinkerArgs stage -> "conf-ld-linker-args-" ++ stageString stage
HsCppArgs -> "hs-cpp-args"
-- | An expression that looks up the value of a 'Setting' in @cfg/system.config@,
-- tracking the result.
getSetting :: Setting -> Expr c b String
getSetting = expr . setting
-- | An expression that looks up the value of a 'SettingList' in
-- @cfg/system.config@, tracking the result.
getSettingList :: SettingList -> Args c b
getSettingList = expr . settingList
-- | Check whether the value of a 'Setting' matches one of the given strings.
matchSetting :: Setting -> [String] -> Action Bool
matchSetting key values = (`elem` values) <$> setting key
-- | Check whether the target platform setting matches one of the given strings.
anyTargetPlatform :: [String] -> Action Bool
anyTargetPlatform = matchSetting TargetPlatformFull
-- | Check whether the target OS setting matches one of the given strings.
anyTargetOs :: [String] -> Action Bool
anyTargetOs = matchSetting TargetOs
-- | Check whether the target architecture setting matches one of the given
-- strings.
anyTargetArch :: [String] -> Action Bool
anyTargetArch = matchSetting TargetArch
-- | Check whether the host OS setting matches one of the given strings.
anyHostOs :: [String] -> Action Bool
anyHostOs = matchSetting HostOs
-- | Check whether the host OS setting is set to @"ios"@.
iosHost :: Action Bool
iosHost = anyHostOs ["ios"]
-- | Check whether the host OS setting is set to @"darwin"@.
osxHost :: Action Bool
osxHost = anyHostOs ["darwin"]
-- | Check whether the host OS setting is set to @"mingw32"@ or @"cygwin32"@.
windowsHost :: Action Bool
windowsHost = anyHostOs ["mingw32", "cygwin32"]
-- | Check whether the target supports GHCi.
ghcWithInterpreter :: Action Bool
ghcWithInterpreter = do
goodOs <- anyTargetOs [ "mingw32", "cygwin32", "linux", "solaris2"
, "freebsd", "dragonfly", "netbsd", "openbsd"
, "darwin", "kfreebsdgnu" ]
goodArch <- anyTargetArch [ "i386", "x86_64", "powerpc", "sparc"
, "sparc64", "arm" ]
return $ goodOs && goodArch
-- | Check whether the target architecture supports placing info tables next to
-- code. See: https://ghc.haskell.org/trac/ghc/wiki/Commentary/Rts/Storage/HeapObjects#TABLES_NEXT_TO_CODE.
ghcEnableTablesNextToCode :: Action Bool
ghcEnableTablesNextToCode = notM $ anyTargetArch ["ia64", "powerpc64", "powerpc64le"]
-- | Check to use @libffi@ for adjustors.
useLibFFIForAdjustors :: Action Bool
useLibFFIForAdjustors = notM $ anyTargetArch ["i386", "x86_64"]
-- | Canonicalised GHC version number, used for integer version comparisons. We
-- expand 'GhcMinorVersion' to two digits by adding a leading zero if necessary.
ghcCanonVersion :: Action String
ghcCanonVersion = do
ghcMajorVersion <- setting GhcMajorVersion
ghcMinorVersion <- setting GhcMinorVersion
let leadingZero = [ '0' | length ghcMinorVersion == 1 ]
return $ ghcMajorVersion ++ leadingZero ++ ghcMinorVersion
-- | Path to the GHC source tree.
topDirectory :: Action FilePath
topDirectory = fixAbsolutePathOnWindows =<< setting GhcSourcePath
-- | The file suffix used for libraries of a given build 'Way'. For example,
-- @_p.a@ corresponds to a static profiled library, and @-ghc7.11.20141222.so@
-- is a dynamic vanilly library. Why do we need GHC version number in the
-- dynamic suffix? Here is a possible reason: dynamic libraries are placed in a
-- single giant directory in the load path of the dynamic linker, and hence we
-- must distinguish different versions of GHC. In contrast, static libraries
-- live in their own per-package directory and hence do not need a unique
-- filename. We also need to respect the system's dynamic extension, e.g. @.dll@
-- or @.so@.
libsuf :: Way -> Action String
libsuf way
| not (wayUnit Dynamic way) = return (waySuffix way ++ ".a") -- e.g., _p.a
| otherwise = do
extension <- setting DynamicExtension -- e.g., .dll or .so
version <- setting ProjectVersion -- e.g., 7.11.20141222
let suffix = waySuffix (removeWayUnit Dynamic way)
return ("-ghc" ++ version ++ suffix ++ extension)
| snowleopard/shaking-up-ghc | src/Oracles/Setting.hs | bsd-3-clause | 9,078 | 0 | 13 | 2,227 | 1,260 | 697 | 563 | 146 | 37 |
module Data.Graph.Inductive.Internal.Queue
(Queue(..), mkQueue, queuePut, queuePutList, queueGet, queueEmpty)
where
{
data Queue a = MkQueue [a] [a];
mkQueue :: Queue a;
mkQueue = MkQueue [] [];
queuePut :: a -> Queue a -> Queue a;
queuePut item (MkQueue ins outs) = MkQueue (item : ins) outs;
queuePutList :: [a] -> Queue a -> Queue a;
queuePutList [] q = q;
queuePutList (x : xs) q = queuePutList xs (queuePut x q);
queueGet :: Queue a -> (a, Queue a);
queueGet (MkQueue ins (item : rest)) = (item, MkQueue ins rest);
queueGet (MkQueue ins []) = queueGet (MkQueue [] (reverse ins));
queueEmpty :: Queue a -> Bool;
queueEmpty (MkQueue ins outs) = (null ins) && (null outs)}
| ckaestne/CIDE | other/CaseStudies/fgl/CIDEfgl/Data/Graph/Inductive/Internal/Queue.hs | gpl-3.0 | 737 | 0 | 9 | 178 | 336 | 184 | 152 | 15 | 1 |
{-
(c) The University of Glasgow 2006
(c) The GRASP/AQUA Project, Glasgow University, 1992-1998
-}
module VarEnv (
-- * Var, Id and TyVar environments (maps)
VarEnv, IdEnv, TyVarEnv, CoVarEnv, TyCoVarEnv,
-- ** Manipulating these environments
emptyVarEnv, unitVarEnv, mkVarEnv, mkVarEnv_Directly,
elemVarEnv, disjointVarEnv,
extendVarEnv, extendVarEnv_C, extendVarEnv_Acc, extendVarEnv_Directly,
extendVarEnvList,
plusVarEnv, plusVarEnv_C, plusVarEnv_CD, plusMaybeVarEnv_C,
plusVarEnvList, alterVarEnv,
delVarEnvList, delVarEnv, delVarEnv_Directly,
minusVarEnv, intersectsVarEnv,
lookupVarEnv, lookupVarEnv_NF, lookupWithDefaultVarEnv,
mapVarEnv, zipVarEnv,
modifyVarEnv, modifyVarEnv_Directly,
isEmptyVarEnv,
elemVarEnvByKey, lookupVarEnv_Directly,
filterVarEnv, filterVarEnv_Directly, restrictVarEnv,
partitionVarEnv,
-- * Deterministic Var environments (maps)
DVarEnv, DIdEnv, DTyVarEnv,
-- ** Manipulating these environments
emptyDVarEnv, mkDVarEnv,
dVarEnvElts,
extendDVarEnv, extendDVarEnv_C,
extendDVarEnvList,
lookupDVarEnv, elemDVarEnv,
isEmptyDVarEnv, foldDVarEnv,
mapDVarEnv, filterDVarEnv,
modifyDVarEnv,
alterDVarEnv,
plusDVarEnv, plusDVarEnv_C,
unitDVarEnv,
delDVarEnv,
delDVarEnvList,
minusDVarEnv,
partitionDVarEnv,
anyDVarEnv,
-- * The InScopeSet type
InScopeSet,
-- ** Operations on InScopeSets
emptyInScopeSet, mkInScopeSet, delInScopeSet,
extendInScopeSet, extendInScopeSetList, extendInScopeSetSet,
getInScopeVars, lookupInScope, lookupInScope_Directly,
unionInScope, elemInScopeSet, uniqAway,
varSetInScope,
-- * The RnEnv2 type
RnEnv2,
-- ** Operations on RnEnv2s
mkRnEnv2, rnBndr2, rnBndrs2, rnBndr2_var,
rnOccL, rnOccR, inRnEnvL, inRnEnvR, rnOccL_maybe, rnOccR_maybe,
rnBndrL, rnBndrR, nukeRnEnvL, nukeRnEnvR, rnSwap,
delBndrL, delBndrR, delBndrsL, delBndrsR,
addRnInScopeSet,
rnEtaL, rnEtaR,
rnInScope, rnInScopeSet, lookupRnInScope,
rnEnvL, rnEnvR,
-- * TidyEnv and its operation
TidyEnv,
emptyTidyEnv
) where
import GhcPrelude
import OccName
import Var
import VarSet
import UniqSet
import UniqFM
import UniqDFM
import Unique
import Util
import Maybes
import Outputable
{-
************************************************************************
* *
In-scope sets
* *
************************************************************************
-}
-- | A set of variables that are in scope at some point
-- "Secrets of the Glasgow Haskell Compiler inliner" Section 3.2 provides
-- the motivation for this abstraction.
data InScopeSet = InScope VarSet {-# UNPACK #-} !Int
-- We store a VarSet here, but we use this for lookups rather than
-- just membership tests. Typically the InScopeSet contains the
-- canonical version of the variable (e.g. with an informative
-- unfolding), so this lookup is useful.
--
-- The Int is a kind of hash-value used by uniqAway
-- For example, it might be the size of the set
-- INVARIANT: it's not zero; we use it as a multiplier in uniqAway
instance Outputable InScopeSet where
ppr (InScope s _) =
text "InScope" <+>
braces (fsep (map (ppr . Var.varName) (nonDetEltsUniqSet s)))
-- It's OK to use nonDetEltsUniqSet here because it's
-- only for pretty printing
-- In-scope sets get big, and with -dppr-debug
-- the output is overwhelming
emptyInScopeSet :: InScopeSet
emptyInScopeSet = InScope emptyVarSet 1
getInScopeVars :: InScopeSet -> VarSet
getInScopeVars (InScope vs _) = vs
mkInScopeSet :: VarSet -> InScopeSet
mkInScopeSet in_scope = InScope in_scope 1
extendInScopeSet :: InScopeSet -> Var -> InScopeSet
extendInScopeSet (InScope in_scope n) v
= InScope (extendVarSet in_scope v) (n + 1)
extendInScopeSetList :: InScopeSet -> [Var] -> InScopeSet
extendInScopeSetList (InScope in_scope n) vs
= InScope (foldl (\s v -> extendVarSet s v) in_scope vs)
(n + length vs)
extendInScopeSetSet :: InScopeSet -> VarSet -> InScopeSet
extendInScopeSetSet (InScope in_scope n) vs
= InScope (in_scope `unionVarSet` vs) (n + sizeUniqSet vs)
delInScopeSet :: InScopeSet -> Var -> InScopeSet
delInScopeSet (InScope in_scope n) v = InScope (in_scope `delVarSet` v) n
elemInScopeSet :: Var -> InScopeSet -> Bool
elemInScopeSet v (InScope in_scope _) = v `elemVarSet` in_scope
-- | Look up a variable the 'InScopeSet'. This lets you map from
-- the variable's identity (unique) to its full value.
lookupInScope :: InScopeSet -> Var -> Maybe Var
lookupInScope (InScope in_scope _) v = lookupVarSet in_scope v
lookupInScope_Directly :: InScopeSet -> Unique -> Maybe Var
lookupInScope_Directly (InScope in_scope _) uniq
= lookupVarSet_Directly in_scope uniq
unionInScope :: InScopeSet -> InScopeSet -> InScopeSet
unionInScope (InScope s1 _) (InScope s2 n2)
= InScope (s1 `unionVarSet` s2) n2
varSetInScope :: VarSet -> InScopeSet -> Bool
varSetInScope vars (InScope s1 _) = vars `subVarSet` s1
-- | @uniqAway in_scope v@ finds a unique that is not used in the
-- in-scope set, and gives that to v.
uniqAway :: InScopeSet -> Var -> Var
-- It starts with v's current unique, of course, in the hope that it won't
-- have to change, and thereafter uses a combination of that and the hash-code
-- found in the in-scope set
uniqAway in_scope var
| var `elemInScopeSet` in_scope = uniqAway' in_scope var -- Make a new one
| otherwise = var -- Nothing to do
uniqAway' :: InScopeSet -> Var -> Var
-- This one *always* makes up a new variable
uniqAway' (InScope set n) var
= try 1
where
orig_unique = getUnique var
try k
| debugIsOn && (k > 1000)
= pprPanic "uniqAway loop:" msg
| uniq `elemVarSetByKey` set = try (k + 1)
| k > 3
= pprTraceDebug "uniqAway:" msg
setVarUnique var uniq
| otherwise = setVarUnique var uniq
where
msg = ppr k <+> text "tries" <+> ppr var <+> int n
uniq = deriveUnique orig_unique (n * k)
{-
************************************************************************
* *
Dual renaming
* *
************************************************************************
-}
-- | Rename Environment 2
--
-- When we are comparing (or matching) types or terms, we are faced with
-- \"going under\" corresponding binders. E.g. when comparing:
--
-- > \x. e1 ~ \y. e2
--
-- Basically we want to rename [@x@ -> @y@] or [@y@ -> @x@], but there are lots of
-- things we must be careful of. In particular, @x@ might be free in @e2@, or
-- y in @e1@. So the idea is that we come up with a fresh binder that is free
-- in neither, and rename @x@ and @y@ respectively. That means we must maintain:
--
-- 1. A renaming for the left-hand expression
--
-- 2. A renaming for the right-hand expressions
--
-- 3. An in-scope set
--
-- Furthermore, when matching, we want to be able to have an 'occurs check',
-- to prevent:
--
-- > \x. f ~ \y. y
--
-- matching with [@f@ -> @y@]. So for each expression we want to know that set of
-- locally-bound variables. That is precisely the domain of the mappings 1.
-- and 2., but we must ensure that we always extend the mappings as we go in.
--
-- All of this information is bundled up in the 'RnEnv2'
data RnEnv2
= RV2 { envL :: VarEnv Var -- Renaming for Left term
, envR :: VarEnv Var -- Renaming for Right term
, in_scope :: InScopeSet } -- In scope in left or right terms
-- The renamings envL and envR are *guaranteed* to contain a binding
-- for every variable bound as we go into the term, even if it is not
-- renamed. That way we can ask what variables are locally bound
-- (inRnEnvL, inRnEnvR)
mkRnEnv2 :: InScopeSet -> RnEnv2
mkRnEnv2 vars = RV2 { envL = emptyVarEnv
, envR = emptyVarEnv
, in_scope = vars }
addRnInScopeSet :: RnEnv2 -> VarSet -> RnEnv2
addRnInScopeSet env vs
| isEmptyVarSet vs = env
| otherwise = env { in_scope = extendInScopeSetSet (in_scope env) vs }
rnInScope :: Var -> RnEnv2 -> Bool
rnInScope x env = x `elemInScopeSet` in_scope env
rnInScopeSet :: RnEnv2 -> InScopeSet
rnInScopeSet = in_scope
-- | Retrieve the left mapping
rnEnvL :: RnEnv2 -> VarEnv Var
rnEnvL = envL
-- | Retrieve the right mapping
rnEnvR :: RnEnv2 -> VarEnv Var
rnEnvR = envR
rnBndrs2 :: RnEnv2 -> [Var] -> [Var] -> RnEnv2
-- ^ Applies 'rnBndr2' to several variables: the two variable lists must be of equal length
rnBndrs2 env bsL bsR = foldl2 rnBndr2 env bsL bsR
rnBndr2 :: RnEnv2 -> Var -> Var -> RnEnv2
-- ^ @rnBndr2 env bL bR@ goes under a binder @bL@ in the Left term,
-- and binder @bR@ in the Right term.
-- It finds a new binder, @new_b@,
-- and returns an environment mapping @bL -> new_b@ and @bR -> new_b@
rnBndr2 env bL bR = fst $ rnBndr2_var env bL bR
rnBndr2_var :: RnEnv2 -> Var -> Var -> (RnEnv2, Var)
-- ^ Similar to 'rnBndr2' but returns the new variable as well as the
-- new environment
rnBndr2_var (RV2 { envL = envL, envR = envR, in_scope = in_scope }) bL bR
= (RV2 { envL = extendVarEnv envL bL new_b -- See Note
, envR = extendVarEnv envR bR new_b -- [Rebinding]
, in_scope = extendInScopeSet in_scope new_b }, new_b)
where
-- Find a new binder not in scope in either term
new_b | not (bL `elemInScopeSet` in_scope) = bL
| not (bR `elemInScopeSet` in_scope) = bR
| otherwise = uniqAway' in_scope bL
-- Note [Rebinding]
-- If the new var is the same as the old one, note that
-- the extendVarEnv *deletes* any current renaming
-- E.g. (\x. \x. ...) ~ (\y. \z. ...)
--
-- Inside \x \y { [x->y], [y->y], {y} }
-- \x \z { [x->x], [y->y, z->x], {y,x} }
rnBndrL :: RnEnv2 -> Var -> (RnEnv2, Var)
-- ^ Similar to 'rnBndr2' but used when there's a binder on the left
-- side only.
rnBndrL (RV2 { envL = envL, envR = envR, in_scope = in_scope }) bL
= (RV2 { envL = extendVarEnv envL bL new_b
, envR = envR
, in_scope = extendInScopeSet in_scope new_b }, new_b)
where
new_b = uniqAway in_scope bL
rnBndrR :: RnEnv2 -> Var -> (RnEnv2, Var)
-- ^ Similar to 'rnBndr2' but used when there's a binder on the right
-- side only.
rnBndrR (RV2 { envL = envL, envR = envR, in_scope = in_scope }) bR
= (RV2 { envR = extendVarEnv envR bR new_b
, envL = envL
, in_scope = extendInScopeSet in_scope new_b }, new_b)
where
new_b = uniqAway in_scope bR
rnEtaL :: RnEnv2 -> Var -> (RnEnv2, Var)
-- ^ Similar to 'rnBndrL' but used for eta expansion
-- See Note [Eta expansion]
rnEtaL (RV2 { envL = envL, envR = envR, in_scope = in_scope }) bL
= (RV2 { envL = extendVarEnv envL bL new_b
, envR = extendVarEnv envR new_b new_b -- Note [Eta expansion]
, in_scope = extendInScopeSet in_scope new_b }, new_b)
where
new_b = uniqAway in_scope bL
rnEtaR :: RnEnv2 -> Var -> (RnEnv2, Var)
-- ^ Similar to 'rnBndr2' but used for eta expansion
-- See Note [Eta expansion]
rnEtaR (RV2 { envL = envL, envR = envR, in_scope = in_scope }) bR
= (RV2 { envL = extendVarEnv envL new_b new_b -- Note [Eta expansion]
, envR = extendVarEnv envR bR new_b
, in_scope = extendInScopeSet in_scope new_b }, new_b)
where
new_b = uniqAway in_scope bR
delBndrL, delBndrR :: RnEnv2 -> Var -> RnEnv2
delBndrL rn@(RV2 { envL = env, in_scope = in_scope }) v
= rn { envL = env `delVarEnv` v, in_scope = in_scope `extendInScopeSet` v }
delBndrR rn@(RV2 { envR = env, in_scope = in_scope }) v
= rn { envR = env `delVarEnv` v, in_scope = in_scope `extendInScopeSet` v }
delBndrsL, delBndrsR :: RnEnv2 -> [Var] -> RnEnv2
delBndrsL rn@(RV2 { envL = env, in_scope = in_scope }) v
= rn { envL = env `delVarEnvList` v, in_scope = in_scope `extendInScopeSetList` v }
delBndrsR rn@(RV2 { envR = env, in_scope = in_scope }) v
= rn { envR = env `delVarEnvList` v, in_scope = in_scope `extendInScopeSetList` v }
rnOccL, rnOccR :: RnEnv2 -> Var -> Var
-- ^ Look up the renaming of an occurrence in the left or right term
rnOccL (RV2 { envL = env }) v = lookupVarEnv env v `orElse` v
rnOccR (RV2 { envR = env }) v = lookupVarEnv env v `orElse` v
rnOccL_maybe, rnOccR_maybe :: RnEnv2 -> Var -> Maybe Var
-- ^ Look up the renaming of an occurrence in the left or right term
rnOccL_maybe (RV2 { envL = env }) v = lookupVarEnv env v
rnOccR_maybe (RV2 { envR = env }) v = lookupVarEnv env v
inRnEnvL, inRnEnvR :: RnEnv2 -> Var -> Bool
-- ^ Tells whether a variable is locally bound
inRnEnvL (RV2 { envL = env }) v = v `elemVarEnv` env
inRnEnvR (RV2 { envR = env }) v = v `elemVarEnv` env
lookupRnInScope :: RnEnv2 -> Var -> Var
lookupRnInScope env v = lookupInScope (in_scope env) v `orElse` v
nukeRnEnvL, nukeRnEnvR :: RnEnv2 -> RnEnv2
-- ^ Wipe the left or right side renaming
nukeRnEnvL env = env { envL = emptyVarEnv }
nukeRnEnvR env = env { envR = emptyVarEnv }
rnSwap :: RnEnv2 -> RnEnv2
-- ^ swap the meaning of left and right
rnSwap (RV2 { envL = envL, envR = envR, in_scope = in_scope })
= RV2 { envL = envR, envR = envL, in_scope = in_scope }
{-
Note [Eta expansion]
~~~~~~~~~~~~~~~~~~~~
When matching
(\x.M) ~ N
we rename x to x' with, where x' is not in scope in
either term. Then we want to behave as if we'd seen
(\x'.M) ~ (\x'.N x')
Since x' isn't in scope in N, the form (\x'. N x') doesn't
capture any variables in N. But we must nevertheless extend
the envR with a binding [x' -> x'], to support the occurs check.
For example, if we don't do this, we can get silly matches like
forall a. (\y.a) ~ v
succeeding with [a -> v y], which is bogus of course.
************************************************************************
* *
Tidying
* *
************************************************************************
-}
-- | Tidy Environment
--
-- When tidying up print names, we keep a mapping of in-scope occ-names
-- (the 'TidyOccEnv') and a Var-to-Var of the current renamings
type TidyEnv = (TidyOccEnv, VarEnv Var)
emptyTidyEnv :: TidyEnv
emptyTidyEnv = (emptyTidyOccEnv, emptyVarEnv)
{-
************************************************************************
* *
\subsection{@VarEnv@s}
* *
************************************************************************
-}
-- | Variable Environment
type VarEnv elt = UniqFM elt
-- | Identifier Environment
type IdEnv elt = VarEnv elt
-- | Type Variable Environment
type TyVarEnv elt = VarEnv elt
-- | Type or Coercion Variable Environment
type TyCoVarEnv elt = VarEnv elt
-- | Coercion Variable Environment
type CoVarEnv elt = VarEnv elt
emptyVarEnv :: VarEnv a
mkVarEnv :: [(Var, a)] -> VarEnv a
mkVarEnv_Directly :: [(Unique, a)] -> VarEnv a
zipVarEnv :: [Var] -> [a] -> VarEnv a
unitVarEnv :: Var -> a -> VarEnv a
alterVarEnv :: (Maybe a -> Maybe a) -> VarEnv a -> Var -> VarEnv a
extendVarEnv :: VarEnv a -> Var -> a -> VarEnv a
extendVarEnv_C :: (a->a->a) -> VarEnv a -> Var -> a -> VarEnv a
extendVarEnv_Acc :: (a->b->b) -> (a->b) -> VarEnv b -> Var -> a -> VarEnv b
extendVarEnv_Directly :: VarEnv a -> Unique -> a -> VarEnv a
plusVarEnv :: VarEnv a -> VarEnv a -> VarEnv a
plusVarEnvList :: [VarEnv a] -> VarEnv a
extendVarEnvList :: VarEnv a -> [(Var, a)] -> VarEnv a
lookupVarEnv_Directly :: VarEnv a -> Unique -> Maybe a
filterVarEnv_Directly :: (Unique -> a -> Bool) -> VarEnv a -> VarEnv a
delVarEnv_Directly :: VarEnv a -> Unique -> VarEnv a
partitionVarEnv :: (a -> Bool) -> VarEnv a -> (VarEnv a, VarEnv a)
restrictVarEnv :: VarEnv a -> VarSet -> VarEnv a
delVarEnvList :: VarEnv a -> [Var] -> VarEnv a
delVarEnv :: VarEnv a -> Var -> VarEnv a
minusVarEnv :: VarEnv a -> VarEnv b -> VarEnv a
intersectsVarEnv :: VarEnv a -> VarEnv a -> Bool
plusVarEnv_C :: (a -> a -> a) -> VarEnv a -> VarEnv a -> VarEnv a
plusVarEnv_CD :: (a -> a -> a) -> VarEnv a -> a -> VarEnv a -> a -> VarEnv a
plusMaybeVarEnv_C :: (a -> a -> Maybe a) -> VarEnv a -> VarEnv a -> VarEnv a
mapVarEnv :: (a -> b) -> VarEnv a -> VarEnv b
modifyVarEnv :: (a -> a) -> VarEnv a -> Var -> VarEnv a
isEmptyVarEnv :: VarEnv a -> Bool
lookupVarEnv :: VarEnv a -> Var -> Maybe a
filterVarEnv :: (a -> Bool) -> VarEnv a -> VarEnv a
lookupVarEnv_NF :: VarEnv a -> Var -> a
lookupWithDefaultVarEnv :: VarEnv a -> a -> Var -> a
elemVarEnv :: Var -> VarEnv a -> Bool
elemVarEnvByKey :: Unique -> VarEnv a -> Bool
disjointVarEnv :: VarEnv a -> VarEnv a -> Bool
elemVarEnv = elemUFM
elemVarEnvByKey = elemUFM_Directly
disjointVarEnv = disjointUFM
alterVarEnv = alterUFM
extendVarEnv = addToUFM
extendVarEnv_C = addToUFM_C
extendVarEnv_Acc = addToUFM_Acc
extendVarEnv_Directly = addToUFM_Directly
extendVarEnvList = addListToUFM
plusVarEnv_C = plusUFM_C
plusVarEnv_CD = plusUFM_CD
plusMaybeVarEnv_C = plusMaybeUFM_C
delVarEnvList = delListFromUFM
delVarEnv = delFromUFM
minusVarEnv = minusUFM
intersectsVarEnv e1 e2 = not (isEmptyVarEnv (e1 `intersectUFM` e2))
plusVarEnv = plusUFM
plusVarEnvList = plusUFMList
lookupVarEnv = lookupUFM
filterVarEnv = filterUFM
lookupWithDefaultVarEnv = lookupWithDefaultUFM
mapVarEnv = mapUFM
mkVarEnv = listToUFM
mkVarEnv_Directly= listToUFM_Directly
emptyVarEnv = emptyUFM
unitVarEnv = unitUFM
isEmptyVarEnv = isNullUFM
lookupVarEnv_Directly = lookupUFM_Directly
filterVarEnv_Directly = filterUFM_Directly
delVarEnv_Directly = delFromUFM_Directly
partitionVarEnv = partitionUFM
restrictVarEnv env vs = filterVarEnv_Directly keep env
where
keep u _ = u `elemVarSetByKey` vs
zipVarEnv tyvars tys = mkVarEnv (zipEqual "zipVarEnv" tyvars tys)
lookupVarEnv_NF env id = case lookupVarEnv env id of
Just xx -> xx
Nothing -> panic "lookupVarEnv_NF: Nothing"
{-
@modifyVarEnv@: Look up a thing in the VarEnv,
then mash it with the modify function, and put it back.
-}
modifyVarEnv mangle_fn env key
= case (lookupVarEnv env key) of
Nothing -> env
Just xx -> extendVarEnv env key (mangle_fn xx)
modifyVarEnv_Directly :: (a -> a) -> UniqFM a -> Unique -> UniqFM a
modifyVarEnv_Directly mangle_fn env key
= case (lookupUFM_Directly env key) of
Nothing -> env
Just xx -> addToUFM_Directly env key (mangle_fn xx)
-- Deterministic VarEnv
-- See Note [Deterministic UniqFM] in UniqDFM for explanation why we need
-- DVarEnv.
-- | Deterministic Variable Environment
type DVarEnv elt = UniqDFM elt
-- | Deterministic Identifier Environment
type DIdEnv elt = DVarEnv elt
-- | Deterministic Type Variable Environment
type DTyVarEnv elt = DVarEnv elt
emptyDVarEnv :: DVarEnv a
emptyDVarEnv = emptyUDFM
dVarEnvElts :: DVarEnv a -> [a]
dVarEnvElts = eltsUDFM
mkDVarEnv :: [(Var, a)] -> DVarEnv a
mkDVarEnv = listToUDFM
extendDVarEnv :: DVarEnv a -> Var -> a -> DVarEnv a
extendDVarEnv = addToUDFM
minusDVarEnv :: DVarEnv a -> DVarEnv a' -> DVarEnv a
minusDVarEnv = minusUDFM
lookupDVarEnv :: DVarEnv a -> Var -> Maybe a
lookupDVarEnv = lookupUDFM
foldDVarEnv :: (a -> b -> b) -> b -> DVarEnv a -> b
foldDVarEnv = foldUDFM
mapDVarEnv :: (a -> b) -> DVarEnv a -> DVarEnv b
mapDVarEnv = mapUDFM
filterDVarEnv :: (a -> Bool) -> DVarEnv a -> DVarEnv a
filterDVarEnv = filterUDFM
alterDVarEnv :: (Maybe a -> Maybe a) -> DVarEnv a -> Var -> DVarEnv a
alterDVarEnv = alterUDFM
plusDVarEnv :: DVarEnv a -> DVarEnv a -> DVarEnv a
plusDVarEnv = plusUDFM
plusDVarEnv_C :: (a -> a -> a) -> DVarEnv a -> DVarEnv a -> DVarEnv a
plusDVarEnv_C = plusUDFM_C
unitDVarEnv :: Var -> a -> DVarEnv a
unitDVarEnv = unitUDFM
delDVarEnv :: DVarEnv a -> Var -> DVarEnv a
delDVarEnv = delFromUDFM
delDVarEnvList :: DVarEnv a -> [Var] -> DVarEnv a
delDVarEnvList = delListFromUDFM
isEmptyDVarEnv :: DVarEnv a -> Bool
isEmptyDVarEnv = isNullUDFM
elemDVarEnv :: Var -> DVarEnv a -> Bool
elemDVarEnv = elemUDFM
extendDVarEnv_C :: (a -> a -> a) -> DVarEnv a -> Var -> a -> DVarEnv a
extendDVarEnv_C = addToUDFM_C
modifyDVarEnv :: (a -> a) -> DVarEnv a -> Var -> DVarEnv a
modifyDVarEnv mangle_fn env key
= case (lookupDVarEnv env key) of
Nothing -> env
Just xx -> extendDVarEnv env key (mangle_fn xx)
partitionDVarEnv :: (a -> Bool) -> DVarEnv a -> (DVarEnv a, DVarEnv a)
partitionDVarEnv = partitionUDFM
extendDVarEnvList :: DVarEnv a -> [(Var, a)] -> DVarEnv a
extendDVarEnvList = addListToUDFM
anyDVarEnv :: (a -> Bool) -> DVarEnv a -> Bool
anyDVarEnv = anyUDFM
| shlevy/ghc | compiler/basicTypes/VarEnv.hs | bsd-3-clause | 21,826 | 0 | 14 | 5,628 | 4,857 | 2,684 | 2,173 | 337 | 2 |
<?xml version="1.0" encoding="UTF-8"?><!DOCTYPE helpset PUBLIC "-//Sun Microsystems Inc.//DTD JavaHelp HelpSet Version 2.0//EN" "http://java.sun.com/products/javahelp/helpset_2_0.dtd">
<helpset version="2.0" xml:lang="ms-MY">
<title>OpenAPI Support Add-on</title>
<maps>
<homeID>openapi</homeID>
<mapref location="map.jhm"/>
</maps>
<view>
<name>TOC</name>
<label>Contents</label>
<type>org.zaproxy.zap.extension.help.ZapTocView</type>
<data>toc.xml</data>
</view>
<view>
<name>Index</name>
<label>Index</label>
<type>javax.help.IndexView</type>
<data>index.xml</data>
</view>
<view>
<name>Search</name>
<label>Search</label>
<type>javax.help.SearchView</type>
<data engine="com.sun.java.help.search.DefaultSearchEngine">
JavaHelpSearch
</data>
</view>
<view>
<name>Favorites</name>
<label>Favorites</label>
<type>javax.help.FavoritesView</type>
</view>
</helpset> | thc202/zap-extensions | addOns/openapi/src/main/javahelp/org/zaproxy/zap/extension/openapi/resources/help_ms_MY/helpset_ms_MY.hs | apache-2.0 | 971 | 77 | 67 | 157 | 413 | 209 | 204 | -1 | -1 |
{-# LANGUAGE RoleAnnotations, RankNTypes, ScopedTypeVariables #-}
import Data.Coerce (coerce, Coercible)
import Data.Ord (Down)
newtype Age = Age Int deriving Show
type role Map nominal _
data Map a b = Map a b deriving Show
foo1 = coerce $ one :: ()
foo2 :: forall m. Monad m => m Age
foo2 = coerce $ (return one :: m Int)
foo3 = coerce $ Map one () :: Map Age ()
foo4 = coerce $ one :: Down Int
newtype Void = Void Void
foo5 = coerce :: Void -> ()
-- Do not test this; fills up memory
--newtype VoidBad a = VoidBad (VoidBad (a,a))
--foo5 = coerce :: (VoidBad ()) -> ()
-- This shoul fail with a context stack overflow
newtype Fix f = Fix (f (Fix f))
foo6 = coerce :: Fix (Either Int) -> Fix (Either Age)
foo7 = coerce :: Fix (Either Int) -> ()
one :: Int
one = 1
main = return ()
| frantisekfarka/ghc-dsi | testsuite/tests/typecheck/should_fail/TcCoercibleFail.hs | bsd-3-clause | 795 | 0 | 9 | 174 | 276 | 154 | 122 | 19 | 1 |
module State0 () where
import State
{-@ fresh :: ST <{\v -> (v >= 0)}, {\xx v -> ((xx>=0) && (v>=0))}> Int Int @-}
fresh :: ST Int Int
fresh = S (\n -> (n, n+1))
{-@ incr4' :: ST <{\v -> (v>=0)}, {\xxxx v -> ((v>=0) && (xxxx>=0))}> Int Int @-}
incr4' :: ST Int Int
incr4' = fresh `bindST` returnST
| mightymoose/liquidhaskell | tests/pos/State1.hs | bsd-3-clause | 301 | 0 | 9 | 65 | 69 | 41 | 28 | 6 | 1 |
module T13591A where
import Second
one :: Int
one = _
| ezyang/ghc | testsuite/tests/ghci/scripts/T13591A.hs | bsd-3-clause | 54 | 0 | 4 | 11 | 17 | 11 | 6 | 4 | 1 |
-- !!! Dynamic library regression tests
module Main(main) where
import Data.Dynamic
main :: IO ()
main = do
test "toDyn" toDyn_list
testIO "fromDyn" fromDyn_test
toDyn_list :: [Dynamic]
toDyn_list =
[ toDyn (1::Int)
, toDyn ('a')
, toDyn False
, toDyn ((-1.0)::Float)
, toDyn (0.0::Double)
, toDyn (1394::Integer)
, toDyn (print "hello")
, toDyn toDyn_list
, toDyn ([]::[Int])
, toDyn (Nothing :: Maybe Int)
, toDyn ((Just 2) :: Maybe Int)
, toDyn ((Just 2) :: Maybe Int)
, toDyn ((Left 3) :: Either Int Bool)
, toDyn ((Right 3) :: Either Char Int)
, toDyn ()
, toDyn LT
, toDyn ((),2::Int)
, toDyn ((),2::Int,'a')
, toDyn ((),2::Int,'a',1.0::Double)
, toDyn ((),2::Int,'a',1.0::Double,Nothing::Maybe Bool)
, toDyn ((+) :: Int -> Int -> Int)
, toDyn ((+) :: Integer -> Integer -> Integer)
, toDyn ((++) :: [Char] -> [Char] -> [Char])
]
-- Testing the conversion from Dynamic values:
fromDyn_test :: IO ()
fromDyn_test = do
print (fromDyn (toDyn (1::Int)) (0::Int))
print (fromDyn (toDyn ('a'::Char)) (0::Int))
print (fromDyn (toDyn 'a') 'b')
print (fromDyn (toDyn (1::Float)) (0::Float))
print (fromDyn (toDyn (2::Float)) (0::Int))
print (fromDyn (toDyn (3::Double)) (0::Double))
print (fromDyn (toDyn (4::Double)) (0::Int))
print (fromDyn (toDyn (5::Integer)) (0::Integer))
print (fromDyn (toDyn (6::Integer)) False)
print (fromDyn (toDyn [1,3,5::Integer]) ([]::[Integer]))
print (fromDyn (toDyn (Just True)) (Nothing::Maybe Bool))
print (fromDyn (toDyn (Left True::Either Bool Bool)) (Right False :: Either Bool Bool))
print (fromDyn (toDyn LT) GT)
print (fromDyn (toDyn ((+1)::Int->Int)) False)
print ((fromDyn (toDyn ((+1)::Int->Int)) ((+2)::Int->Int)) 3)
print ((fromDyn (toDyn ((++)::[Int]->[Int]->[Int])) ((undefined)::[Int]->[Int]->[Int])) [1] [2])
-- Misc test utilities:
test :: Show a => String -> [a] -> IO ()
test str ls = do
putStrLn ("*** Testing: " ++ str ++ " ***")
putStrLn (showListLn ls)
testIO :: String -> IO () -> IO ()
testIO str tst = do
putStrLn ("*** Testing: " ++ str ++ " ***")
tst
-- showListLn presents a list in a diff-friendly format.
-- showListLn [a1,..an]
-- =>
-- [ a1
-- , a2
-- ..
-- , an
-- ]
--
showListLn :: Show a => [a] -> String
showListLn [] = ""
showListLn ls = '[' : ' ' : go ls
where
go [x] = show x ++ "\n]"
go (x:xs) = show x ++ '\n':',':' ':go xs
{-
test8 = toDyn (mkAppTy listTc)
test9 :: Float
test9 = fromDyn test8 0
printf :: String -> [Dynamic] -> IO ()
printf str args = putStr (decode str args)
where
decode [] [] = []
decode ('%':'n':cs) (d:ds) =
(\ v -> show v++decode cs ds) (fromDyn d (0::Int))
decode ('%':'c':cs) (d:ds) =
(\ v -> show v++decode cs ds) (fromDyn d ('\0'))
decode ('%':'b':cs) (d:ds) =
(\ v -> show v++decode cs ds) (fromDyn d (False::Bool))
decode (x:xs) ds = x:decode xs ds
test10 :: IO ()
test10 = printf "%n = %c, that much is %b\n" [toDyn (3::Int),toDyn 'a', toDyn False]
-}
| seereason/ghcjs | test/pkg/base/dynamic001.hs | mit | 3,104 | 2 | 17 | 720 | 1,286 | 692 | 594 | 62 | 2 |
import Data.Complex
main :: IO ()
main = do let res = dft $ enumerate $ map (\x -> x :+ 0) [1..100]
putStrLn $ show res
enumerate :: Integral b => [a] -> [(a, b)]
enumerate l = zip l [0..]
dft :: (RealFloat a, Integral b) => [(Complex a, b)] -> [Complex a]
dft x = map (\(_, k) -> singleDFT k x) x
singleDFT :: (RealFloat a, Integral b) => b -> [(Complex a, b)] -> Complex a
singleDFT k x = sum $ map (\(e, n) -> (e *) $ fCoeff k n $ length x) $ x
fCoeff :: (RealFloat a, Integral b) => b -> b -> b -> Complex a
fCoeff k n n_tot = exp (0.0 :+ ((-2.0) * pi * (fromIntegral $ k * n) /
(fromIntegral n_tot)))
| fredmorcos/attic | snippets/haskell/DFT.hs | isc | 681 | 2 | 14 | 212 | 382 | 201 | 181 | 13 | 1 |
{-# LANGUAGE OverloadedStrings #-}
module Pretty where
import qualified Pact as P
import qualified Data.Maybe as M
import qualified Data.ByteString.Lazy.Char8 as BLC
import Text.PrettyPrint.ANSI.Leijen
import Data.Aeson.Encode.Pretty (encodePretty)
type Path = String
contractStart :: Path -> P.ContractDescription -> Doc
contractStart path contract =
vsep
[ text ("using contract at: " ++ path)
, text ("consumer: " ++ P.serviceName (P.contractConsumer contract))
, text ("provider: " ++ P.serviceName (P.contractProvider contract))
]
verifyStart :: P.Interaction -> Doc
verifyStart interaction =
hang 4 $
(text "-" <+> underline (text (P.interactionDescription interaction))) </>
(text "with state:" <+> text (M.fromMaybe "N/A" (P.interactionState interaction)))
headers :: P.Headers -> Doc
headers (P.Headers hs) =
vcat (map (\(k, v) -> (fillBreak (maxHeaderLength + 1) (string k <> ":") <+> string v)) hs)
where
maxHeaderLength =
maximum (map (\(k, _) -> length k) hs)
validationError :: P.ValidationError -> Doc
validationError err =
case err of
P.MethodValidationError left right ->
vcat
[ hang 2 (text "expected method:" <$$> green (string left))
, hang 2 (text "actual method:" <$$> red (string right))
]
P.PathValidationError left right ->
vcat
[ hang 2 (text "expected path:" <$$> green (string left))
, hang 2 (text "actual path:" <$$> red (string right))
]
P.QueryValidationError left right ->
vcat
[ hang 2 (text "expected query:" <$$> green (string (show left)))
, hang 2 (text "actual query:" <$$> red (string (show right)))
]
P.StatusValidationError left right ->
vcat
[ hang 2 (text "expected status code:" <$$> green (int left))
, hang 2 (text "actual status code:" <$$> red (int right))
]
P.HeaderValidationError left right ->
vcat
[ hang 2 (text "expected headers:" <$$> green (headers left))
, hang 2 (text "actual headers:" <$$> red (headers right))
]
P.BodyValidationError left right ->
vcat
[ hang 2 (text "expected body:" <$$> green (string (BLC.unpack (encodePretty left))))
, hang 2 (text "actual body:" <$$> red (string (BLC.unpack (encodePretty right))))
]
validationErrors :: [P.ValidationError] -> Doc
validationErrors errs =
vcat (map (\err -> validationError err <> line) errs)
| mannersio/manners | cli/src/Pretty.hs | mit | 2,474 | 0 | 19 | 588 | 885 | 448 | 437 | 54 | 6 |
{-# LANGUAGE OverloadedStrings #-}
module MyAntigen where
import Antigen (
-- Rudimentary imports
AntigenConfig (..)
, defaultConfig
, bundle
, antigen
-- If you want to source a bit trickier plugins
, ZshPlugin (..)
, antigenSourcingStrategy
, filePathsSourcingStrategy
)
bundles =
[ bundle "Tarrasch/zsh-functional"
, bundle "Tarrasch/zsh-bd"
, bundle "zsh-users/zsh-syntax-highlighting"
, bundle "zsh-users/zsh-history-substring-search"
, bundle "nojhan/liquidprompt"
, (bundle "robbyrussell/oh-my-zsh")
{ sourcingLocations = [ "plugins/common-aliases"
, "plugins/git"
, "plugins/git-extras"
, "plugins/rsync" ] }
]
config = defaultConfig { plugins = bundles }
main :: IO ()
main = antigen config | jacquesd/dotfiles | zsh/MyAntigen.hs | mit | 945 | 0 | 8 | 334 | 136 | 82 | 54 | 24 | 1 |
module Queens (queens) where
import Control.Monad (zipWithM_)
import FD
queens n = runFD $ do
vars <- news n (1, n)
allDifferent vars
diagonals vars
labelling vars
diagonals :: [FDExpr] -> FDConstraint
diagonals [] = return ()
diagonals (x:xs) = do
zipWithM_ (diag x) xs [1..]
diagonals xs
diag :: FDExpr -> FDExpr -> FDExpr -> FDConstraint
diag x y n = do
y #\= x + n
y #\= x - n
| dmoverton/finite-domain | src/Queens.hs | mit | 421 | 0 | 10 | 113 | 188 | 93 | 95 | 17 | 1 |
{-# LANGUAGE OverloadedStrings #-}
{-|
Module : Travis
Description : A simple client implementation using Travis CI API.
Copyright : (c) Tomas Tauber, 2015
License : MIT
Maintainer : Tomas Tauber <tomtau@hku.hk>
Stability : experimental
A simple client implementation using Travis CI API: <http://docs.travis-ci.com/api/>.
-}
module Travis where
import Data.Aeson (decode)
import Network.HTTP.Conduit
import Travis.Types (BuildRaw (..), BuildsRaw (..),
RepoRaw (..), RepositoryRaw (..))
type AccountName = String
type RepositoryName = String
-- |Travis CI base API URL
travisAPIBaseURL = "https://api.travis-ci.org/repos/" :: String
-- |Travis CI API v2 headers
requestH = [("Accept", "application/vnd.travis-ci.2+json"),
("User-Agent", "HaskellHTTPConduit/2.1.8")]
-- |fetches information about public repository on Travis CI
-- may return a tuple of repository and its builds information
fetchRepository :: AccountName -> RepositoryName -> IO (Maybe RepositoryRaw, Maybe BuildsRaw)
fetchRepository acname reponame = do
-- repository info
let coreUrl = travisAPIBaseURL ++ acname ++ "/" ++ reponame
req1 <- parseUrl coreUrl
let request1 = req1 { requestHeaders = requestH}
manager <- newManager conduitManagerSettings
_response1 <- httpLbs request1 manager
let repository = decode (responseBody _response1) :: Maybe RepoRaw
-- build info
let buildsUrl = coreUrl ++ "/builds"
req2 <- parseUrl buildsUrl
let request2 = req2 { requestHeaders = requestH}
_response2 <- httpLbs request2 manager
let repobuilds = decode (responseBody _response2) :: Maybe BuildsRaw
return (fmap repo repository, repobuilds)
| tomtau/travis | src/Travis.hs | mit | 1,749 | 0 | 12 | 360 | 331 | 178 | 153 | 25 | 1 |
{-# LANGUAGE RecordWildCards #-}
module Main where
import WangsAlgorithm.Prover
import WangsAlgorithm.LaTeX (latexProof)
import qualified WangsAlgorithm.Parser as P
import Options.Applicative
import Data.Semigroup ((<>))
data Backend =
Text
| LaTeX
deriving (Show, Eq, Read, Enum, Bounded)
allBackends :: [Backend]
allBackends = enumFrom minBound
data Input = Input
{ sequentStr :: String
, backend :: Backend
}
getInput :: Parser Input
getInput = Input
<$> strOption
( long "sequent"
<> short 's'
<> metavar "SEQUENT"
<> help "The propositional logic sequent to be proved" )
<*> option auto
( long "backend"
<> short 'b'
<> value Text
<> help ("Select one of " ++ show allBackends))
run :: Input -> IO ()
run Input{..} = case P.readSequent sequentStr of
Left err -> error $ "Cannot be parsed: " ++ show err
Right sequent -> case prove sequent of
Nothing -> error "No possible moves."
Just pf -> case backend of
Text -> putStrLn $ showProof pf
LaTeX -> putStrLn $ latexProof pf
main :: IO ()
main = run =<< execParser opts
where
opts = info (getInput <**> helper)
( fullDesc
<> progDesc ("Enter your sequent in the following format: "
++ "[a|b, a&c, ~b, c->d] |- [b,c]")
<> header "A propositional theorem prover for LK using Wang's Algorithm")
| joom/WangsAlgorithm | execs/Main.hs | mit | 1,395 | 0 | 15 | 356 | 394 | 202 | 192 | 43 | 4 |
module Glucose.Parser.EOFOr where
data EOFOr a = EOF | NotEOF a
deriving (Functor, Eq, Show)
-- instance Ord a => Ord (EOFOr a) where
-- compare EOF EOF = EQ
-- compare EOF (NotEOF _) = GT
-- compare (NotEOF _) EOF = LT
-- compare (NotEOF a) (NotEOF b) = compare a b
maybeEOF :: b -> (a -> b) -> EOFOr a -> b
maybeEOF b _ EOF = b
maybeEOF _ f (NotEOF a) = f a
| sardonicpresence/glucose | src/Glucose/Parser/EOFOr.hs | mit | 387 | 0 | 8 | 107 | 100 | 56 | 44 | 6 | 1 |
module Main where
import JavaFX
import TeaStormApp
main :: IO ()
main = javafx (Proxy :: Proxy TeaStormApp)
| filippovitale/eta-playground | javafx-teastorm/src/Main.hs | mit | 110 | 0 | 7 | 20 | 37 | 21 | 16 | 5 | 1 |
{-# LANGUAGE LambdaCase #-}
{-# LANGUAGE ScopedTypeVariables #-}
--------------------------------------------------------------------------------
-- |
-- Module : Network.MQTT.Broker
-- Copyright : (c) Lars Petersen 2016
-- License : MIT
--
-- Maintainer : info@lars-petersen.net
-- Stability : experimental
--------------------------------------------------------------------------------
module Network.MQTT.Broker
( Broker (brokerAuthenticator)
, Callbacks (..)
, newBroker
, publishUpstream
, publishDownstream
, withSession
, getUptime
, getSessions
, getSubscriptions
, lookupSession
, terminateExpiredSessions
, terminateExpiredSessionsAt
) where
import Control.Concurrent.InterruptibleLock
import Control.Concurrent.MVar
import Control.Exception
import Control.Monad (mapM_, void, when)
import Data.Int
import qualified Data.IntMap.Strict as IM
import qualified Data.IntSet as IS
import qualified Data.Map.Strict as M
import Data.Maybe
import System.Clock
import Network.MQTT.Broker.Authentication
import Network.MQTT.Broker.Internal
import qualified Network.MQTT.Broker.RetainedMessages as RM
import qualified Network.MQTT.Broker.Session as Session
import qualified Network.MQTT.Broker.Session.Statistic as Session
import Network.MQTT.Message
import qualified Network.MQTT.Trie as R
newBroker :: IO auth -> Callbacks auth -> IO (Broker auth)
newBroker getAuthenticator cbs = do
now <- sec <$> getTime Realtime
rm <- RM.new
st <-newMVar BrokerState
{ brokerMaxSessionIdentifier = SessionIdentifier 0
, brokerSubscriptions = mempty
, brokerSessions = mempty
, brokerSessionsByPrincipals = mempty
}
pure Broker {
brokerCreatedAt = now
, brokerCallbacks = cbs
, brokerAuthenticator = getAuthenticator
, brokerRetainedStore = rm
, brokerState = st
}
withSession :: forall auth. (Authenticator auth) => Broker auth -> ConnectionRequest -> (RejectReason -> IO ()) -> (Session auth -> SessionPresent -> IO ()) -> IO ()
withSession broker request sessionRejectHandler sessionAcceptHandler = do
authenticator <- brokerAuthenticator broker
(try $ authenticate authenticator request :: IO (Either (AuthenticationException auth) (Maybe PrincipalIdentifier))) >>= \case
Left _ -> sessionRejectHandler ServerUnavailable
Right mp -> case mp of
Nothing -> sessionRejectHandler NotAuthorized
Just principalIdentifier -> bracket
-- In case the principals identity could be determined, we'll either
-- find an associated existing session or create a new one.
-- Getting/creating a session eventually modifies the broker state.
( getSession broker principalIdentifier (requestClientIdentifier request) )
-- This is executed when the current thread terminates (on connection loss).
-- Cleanup actions are executed here (like removing the session when the clean session flag was set).
(\case
Nothing -> pure ()
Just (session, _, _) -> if requestCleanSession request
then Session.terminate session
else Session.reset session
)
-- This is where the actual connection handler code is invoked.
-- We're using a `PrioritySemaphore` here. This allows other threads for
-- this session to terminate the current one. This is usually the case
-- when the client loses the connection and reconnects, but we have not
-- yet noted the dead connection. The currently running handler thread
-- will receive a `ThreadKilled` exception.
(\case
Nothing -> sessionRejectHandler NotAuthorized
Just (session, sessionPresent, maxSessionsExceeded )-> do
when maxSessionsExceeded $
terminateOldestSessionOfPrincipal broker principalIdentifier
acceptAndServeConnection session sessionPresent
)
where
acceptAndServeConnection :: Session auth -> SessionPresent -> IO ()
acceptAndServeConnection session sessionPresent =
exclusively (sessionLock session) $
let serve = onConnect' >> sessionAcceptHandler session sessionPresent >> onDisconnect' Nothing
in serve `catch` (\e-> onDisconnect' $ Just $ show (e :: SomeException) )
where
onConnect' :: IO ()
onConnect' = do
now <- sec <$> getTime Realtime
modifyMVar_ (sessionConnectionState session) $ \case
Connected {} ->
throwIO $ AssertionFailed "Session shouldn't be marked as connected here."
Disconnected {} ->
pure Connected {
connectedAt = now
, connectedCleanSession = requestCleanSession request
, connectedSecure = requestSecure request
, connectedWebSocket = isJust (requestHttp request)
, connectedRemoteAddress = requestRemoteAddress request
, connectedWill = requestWill request
}
onDisconnect' :: Maybe String -> IO ()
onDisconnect' reason = do
now <- sec <$> getTime Realtime
ttl <- quotaMaxIdleSessionTTL . principalQuota <$> Session.getPrincipal session
case requestWill request of
Nothing -> pure ()
Just msg
| isJust reason -> Session.publish session msg
| otherwise -> pure ()
modifyMVar_ (sessionConnectionState session) $ const $
pure Disconnected {
disconnectedAt = now
, disconnectedSessionExpiresAt = now + fromIntegral ttl
, disconnectedWith = reason
}
lookupSession :: SessionIdentifier -> Broker auth -> IO (Maybe (Session auth))
lookupSession (SessionIdentifier sid) broker =
withMVar (brokerState broker) $ \st->
pure $ IM.lookup sid (brokerSessions st)
terminateExpiredSessions :: Broker auth -> IO ()
terminateExpiredSessions broker = do
now <- sec <$> getTime Realtime
terminateExpiredSessionsAt broker now
terminateExpiredSessionsAt :: Broker auth -> Int64 -> IO ()
terminateExpiredSessionsAt broker timestamp =
getSessions broker >>= mapM_ terminateIfExpired
where
terminateIfExpired session =
Session.getConnectionState session >>= \case
Connected {} -> pure ()
Disconnected { disconnectedSessionExpiresAt = expiration } ->
when ( timestamp >= expiration ) $ Session.terminate session
-- | Either lookup or create a session if none is present (yet).
--
-- Principal is only looked up initially. Reconnects won't update the
-- permissions etc. Returns Nothing in case the principal identifier cannot
-- be mapped to a principal object.
getSession :: Authenticator auth => Broker auth -> PrincipalIdentifier -> ClientIdentifier -> IO (Maybe (Session auth, SessionPresent, Bool))
getSession broker pid cid = do
authenticator <- brokerAuthenticator broker
-- Resuming an existing session..
-- Re-fetch the principal and its permissions.
getPrincipal authenticator pid >>= \case
Nothing -> pure Nothing
Just principal ->
modifyMVar (brokerState broker) $ \st->
case M.lookup pid (brokerSessionsByPrincipals st) of
-- No session entry found for principal.
-- Creating a new one.
Nothing -> do
(st', session) <- createSession principal st
-- Session limit cannot be exceeded with 1 session.
pure (st', Just (session, SessionPresent False, False))
-- At least one session exists for this principal.
-- Find the correct one or create a new one.
Just cim -> case M.lookup cid cim of
Nothing -> do
(st', session) <- createSession principal st
pure $ (st', Just (session, SessionPresent False, M.size cim >= quotaMaxSessions (principalQuota principal)))
Just (SessionIdentifier sid) ->
case IM.lookup sid (brokerSessions st) of
Nothing -> throwIO $ AssertionFailed $
"Encountered orhphaned session id " ++ show sid ++
" for principal " ++ show pid ++" (" ++ show cid ++ ")."
Just session -> do
void $ swapMVar (sessionPrincipal session) principal
-- Session limit cannot be exceeded when continuing an existing one.
pure (st, Just (session, SessionPresent True, False))
where
createSession principal st = do
now <- sec <$> getTime Realtime
lock <- newInterruptibleLock
subscriptions <- newMVar R.empty
queue <- newMVar (emptyServerQueue $ fromIntegral $ quotaMaxPacketIdentifiers $ principalQuota principal)
queuePending <- newEmptyMVar
mconnection <- newMVar $ Disconnected 0 0 mempty
mprincipal <- newMVar principal
stats <- Session.newStatistic
let SessionIdentifier maxSessionIdentifier = brokerMaxSessionIdentifier st
newSessionIdentifier = maxSessionIdentifier + 1
newSession = Session
{ sessionBroker = broker
, sessionIdentifier = SessionIdentifier newSessionIdentifier
, sessionClientIdentifier = cid
, sessionPrincipalIdentifier = pid
, sessionCreatedAt = now
, sessionConnectionState = mconnection
, sessionPrincipal = mprincipal
, sessionLock = lock
, sessionSubscriptions = subscriptions
, sessionQueue = queue
, sessionQueuePending = queuePending
, sessionStatistic = stats
}
newBrokerState = st
{ brokerMaxSessionIdentifier = SessionIdentifier newSessionIdentifier
, brokerSessions = IM.insert newSessionIdentifier newSession (brokerSessions st)
, brokerSessionsByPrincipals = flip (M.insert pid) (brokerSessionsByPrincipals st) $! case M.lookup pid (brokerSessionsByPrincipals st) of
Nothing -> M.singleton cid (SessionIdentifier newSessionIdentifier)
Just cim -> M.insert cid (SessionIdentifier newSessionIdentifier) cim
}
pure (newBrokerState, newSession)
getUptime :: Broker auth -> IO Int64
getUptime broker = do
now <- sec <$> getTime Realtime
pure $ now - brokerCreatedAt broker
getSessions :: Broker auth -> IO (IM.IntMap (Session auth))
getSessions broker = brokerSessions <$> readMVar (brokerState broker)
getSubscriptions :: Broker auth -> IO (R.Trie IS.IntSet)
getSubscriptions broker = brokerSubscriptions <$> readMVar (brokerState broker)
| lpeterse/haskell-mqtt | src/Network/MQTT/Broker.hs | mit | 11,336 | 2 | 29 | 3,426 | 2,255 | 1,161 | 1,094 | 181 | 8 |
-- ghci
-- :load C:\Users\Thomas\Documents\GitHub\haskell.practice\PE\Problem0019.hs
-- :r
-- :set +s
module Problem19 where
import Data.Time.Calendar
import Data.Time.Calendar.WeekDate
--1 monday - 7 sunday http://www.haskell.org/ghc/docs/6.12.3/html/libraries/time-1.1.4/Data-Time-Calendar-WeekDate.html
-- First element of result is year, second week number (1-53), third day of week (1 for Monday to 7 for Sunday) for toWeekDate
isSunday dateToCheck = isSundayLogic $ toWeekDate dateToCheck
where
isSundayLogic (_,_,dayOfWeek) = dayOfWeek == 7
createListOf1stDaysOfMonthFor years = [fromGregorian year month 1 | year <- years, month <-[1..12]]
countOfFirstSundaysThatAreTheFirstDayOfTheMonthFor years = length $ filter isSunday listOfFirstDaysOfTheMonth
where
listOfFirstDaysOfTheMonth = createListOf1stDaysOfMonthFor years
answer = countOfFirstSundaysThatAreTheFirstDayOfTheMonthFor [1901..2000]
--tests
countOfFirstSundaysThatAreTheFirstDayOfTheMonthForTests = and
[
countOfFirstSundaysThatAreTheFirstDayOfTheMonthFor [2014] == 1,
countOfFirstSundaysThatAreTheFirstDayOfTheMonthFor [2015] == 3,
countOfFirstSundaysThatAreTheFirstDayOfTheMonthFor [2014..2015] == 4
]
createListOf1stDaysOfMonthForTests = and
[
length (createListOf1stDaysOfMonthFor [2014]) == 12,
isSunday ((createListOf1stDaysOfMonthFor [2014]) !! 5) == True,
isSunday ((createListOf1stDaysOfMonthFor [2014]) !! 6) == False
]
isSundayTests = and
[
isSunday (fromGregorian 2014 11 1) == False,
isSunday (fromGregorian 2015 2 1) == True,
isSunday (fromGregorian 2014 9 1) == False
]
tests = and
[
isSundayTests,
createListOf1stDaysOfMonthForTests,
countOfFirstSundaysThatAreTheFirstDayOfTheMonthForTests
] | Sobieck00/practice | pe/nonvisualstudio/haskell/OldWork/Implementation/Problem0019.hs | mit | 1,792 | 66 | 13 | 283 | 419 | 229 | 190 | 29 | 1 |
import Data.Hash.MD5
main = do
let input = "ojvtpuvg"
result = compute input
print result
compute :: String -> String
compute input = take 8 $ findPass input 0
findPass :: String -> Int -> String
findPass input index1 = c : findPass input (index2 + 1)
where (c, index2) = computeHash input index1
computeHash :: String -> Int -> (Char, Int)
computeHash input index
| take 5 hash == "00000" = (hash !! 5, index)
| otherwise = computeHash input (index + 1)
where hash = md5s $ Str $ input ++ show index | aBhallo/AoC2016 | Day 5/day5part1.hs | mit | 555 | 0 | 10 | 149 | 219 | 110 | 109 | 15 | 1 |
import ChunkedMain (chunkedMain)
import FrequencyParMap (frequencyChunked)
main :: IO ()
main = chunkedMain frequencyChunked
| apauley/parallel-frequency | src/freq-parmap.hs | mit | 126 | 0 | 6 | 15 | 35 | 19 | 16 | 4 | 1 |
{-# LANGUAGE OverloadedStrings, TemplateHaskell #-}
-- | Alert API.
module Web.Mackerel.Api.Alert (listAlerts, closeAlert) where
import Data.Aeson.TH (deriveJSON)
import qualified Data.ByteString.Char8 as BS
import qualified Data.HashMap.Lazy as HM
import Network.HTTP.Types (StdMethod(..))
import Web.Mackerel.Client
import Web.Mackerel.Internal.Api
import Web.Mackerel.Internal.TH
import Web.Mackerel.Types.Alert
data ListAlertsResponse = ListAlertsResponse { responseAlerts :: [Alert] }
$(deriveJSON options ''ListAlertsResponse)
listAlerts :: Client -> IO (Either ApiError [Alert])
listAlerts client
= request client GET "/api/v0/alerts" [] emptyBody (createHandler responseAlerts)
closeAlert :: Client -> AlertId -> String -> IO (Either ApiError Alert)
closeAlert client (AlertId alertId') reason = do
let body = Just $ HM.fromList [("reason", reason) :: (String, String)]
request client POST ("/api/v0/alerts/" <> BS.pack alertId' <> "/close") [] body (createHandler id)
| itchyny/mackerel-client-hs | src/Web/Mackerel/Api/Alert.hs | mit | 988 | 0 | 13 | 123 | 295 | 168 | 127 | 19 | 1 |
{-# LANGUAGE RecordWildCards #-}
{-# LANGUAGE MultiParamTypeClasses #-}
-------------------------------------------------------------------------------
-- Module : Domain.Concrete.Transformers.System
-- Copyright : (c) 2017 Marcelo Sousa
--
-- Transformers for the concrete semantics.
-- Transformer for the system
-------------------------------------------------------------------------------
module Domain.Concrete.Transformers.System where
import Control.Monad.State.Lazy hiding (join)
import Data.List
import Data.Map (Map)
import Data.Maybe
import Domain.Action
import Domain.Concrete.API (set_pos)
import Domain.Concrete.State
import Domain.Concrete.Transformers.State
import Domain.Concrete.Transformers.Declaration
import Domain.Concrete.Value
import Domain.Lattice
import Domain.MemAddr
import Domain.Util
import Language.C.Syntax.Constants
import Language.C.Syntax.Ops
import Language.SimpleC.AST hiding (Value)
import Language.SimpleC.Converter hiding (Scope(..))
import Language.SimpleC.Flow
import Language.SimpleC.Util
import Util.Generic hiding (safeLookup)
import qualified Data.Map as M
import qualified Data.Set as S
import qualified Model.GCS as GCS
-- | converts the front end into a system
convert :: FrontEnd () (ConState,ConAct) -> GCS.System ConState ConAct
convert fe =
let (pos_main,sym_main) = get_entry "main" (cfgs fe) (symt fe)
init_tstate = ConTState Global empty_cstate (symt fe) (cfgs fe) False False 0 S.empty
(acts,s@ConTState{..}) = runState (transformer_decls $ decls $ ast fe) init_tstate
st' = set_pos st (symId sym_main) sym_main pos_main
in mytrace False ("convert: initial state = " ++ show st) $ GCS.System st' acts (cfgs fe) (symt fe) [GCS.main_tid] 1
-- | processes a list of declarations
transformer_decls :: [SDeclaration] -> ConTOp ConAct
transformer_decls = mytrace False ("transformer_decls!!!!") $
foldM (\a d -> transformer_decl d >>= \a' -> return $ a `join` a') bot
| marcelosousa/poet | src/Domain/Concrete/Transformers/System.hs | gpl-2.0 | 1,962 | 0 | 12 | 258 | 488 | 285 | 203 | 36 | 1 |
module Interface (startInterface) where
import Interface.Torrent.Info
import Interface.Torrent.List
import Interface.Torrent.Behavior
import Interface.Torrent.Handler
import Interface.Completed
import Interface.Peer
import HTorrentPrelude
import MetaInfo
import Torrent.Env
import qualified Graphics.UI.Threepenny as UI
import Graphics.UI.Threepenny.Core
import Reactive.Threepenny
startInterface :: TorrentEnv -> IO ()
startInterface env = do
let i = env
(torrentBehavior, torrentHandlerEnv) <- runReaderT torrentBehavior env
forkIO (runReaderT runTorrentHandlerInit torrentHandlerEnv)
startGUI config (interface torrentBehavior)
where config = defaultConfig {tpPort = 10000, tpStatic = Just "static"}
interface :: TorrentBehavior -> Window -> UI ()
interface b w = do
UI.addStyleSheet w "hTorrent.css"
UI.set UI.title "hTorrent" (return w)
(torrentTable, torrentFocusB) <- torrentList [b]
torrentFocus <- torrentInfoFocus torrentFocusB
let body = UI.set style bodyStyle (getBody w)
void (body #+ layout torrentTable torrentFocus)
where bodyStyle = [ ("padding", "0"),
("margin", "0"),
("height", "100%"),
("min-height", "100%") ]
layout :: Element -> Element -> [UI Element]
layout torrentTable torrentInfo = [body, footer]
where bodyStyle = []
bodyDiv = UI.set style bodyStyle UI.div
body = bodyDiv #+ [UI.element torrentTable]
footerDiv = UI.set style footerStyle UI.div
footer = footerDiv #+ [UI.element torrentInfo]
footerStyle :: [(String, String)]
footerStyle = [
("position", "fixed"),
("left", "0"),
("bottom", "0"),
("height", "40%"),
("width", "100%") ]
| ian-mi/hTorrent | Interface.hs | gpl-3.0 | 1,766 | 0 | 12 | 387 | 522 | 288 | 234 | 46 | 1 |
{-# LANGUAGE TemplateHaskell #-}
{-# LANGUAGE OverloadedStrings #-}
module Language.UHIM.Japanese.Adjective where
import qualified Data.Text as T
import qualified Data.Map as M
import Data.Map (Map)
import Data.Aeson.Types hiding (parse)
import Language.UHIM.Japanese.Prim (JaYomi(..))
data JaAdjConjugation = JaAdjI | JaAdjKu
| JaAdjSii | JaAdjSiku
| JaAdjZii | JaAdjZiku
| JaAdjNa | JaAdjNari
| JaAdjTari
deriving (Eq, Ord, Show, Read, Enum, Bounded)
isClassicalAdjConjugation :: JaAdjConjugation -> Bool
isClassicalAdjConjugation JaAdjKu = True
isClassicalAdjConjugation JaAdjSiku = True
isClassicalAdjConjugation JaAdjZiku = True
isClassicalAdjConjugation JaAdjNari = True
isClassicalAdjConjugation JaAdjTari = True
isClassicalAdjConjugation _ = False
jaAdjClasses :: Map String JaAdjConjugation
jaAdjClasses = M.fromList ls
where
ls = [ ("イ", JaAdjI)
, ("ク", JaAdjKu)
, ("シイ", JaAdjSii)
, ("シク", JaAdjSiku)
, ("ジイ", JaAdjZii)
, ("ジク", JaAdjZiku)
, ("ナ", JaAdjNa)
, ("ナリ", JaAdjNari)
, ("タリ", JaAdjTari)
]
parse :: String -> Maybe JaAdjConjugation
parse = flip M.lookup jaAdjClasses
toSymbol :: JaAdjConjugation -> String
toSymbol JaAdjI = "イ"
toSymbol JaAdjKu = "ク"
toSymbol JaAdjSii = "シイ"
toSymbol JaAdjSiku = "シク"
toSymbol JaAdjZii = "ジイ"
toSymbol JaAdjZiku = "ジク"
toSymbol JaAdjNa = "ナ"
toSymbol JaAdjNari = "ナリ"
toSymbol JaAdjTari = "タリ"
conjDictForm :: JaAdjConjugation -> JaYomi
conjDictForm JaAdjI = NonChange "い"
conjDictForm JaAdjKu = NonChange "し"
conjDictForm JaAdjSii = NonChange "しい"
conjDictForm JaAdjSiku = NonChange "し"
conjDictForm JaAdjZii = NonChange "じい"
conjDictForm JaAdjZiku = NonChange "じ"
-- ToDo: Rethink about endings of those adjective classes
conjDictForm JaAdjNa = mempty
conjDictForm JaAdjNari = mempty
conjDictForm JaAdjTari = mempty
instance ToJSON JaAdjConjugation where
toJSON jvc = toJSON $ toSymbol jvc
instance FromJSON JaAdjConjugation where
parseJSON (String v) = maybe (fail . T.unpack $ "Unknown Japanese adjective class:" `mappend` v) pure . parse $ T.unpack v
parseJSON v = typeMismatch "JaYomi" v
| na4zagin3/uhim-dict | src/Language/UHIM/Japanese/Adjective.hs | gpl-3.0 | 2,340 | 0 | 13 | 493 | 587 | 324 | 263 | 59 | 1 |
module HEP.Automation.MadGraph.Dataset.Set20110714set2 where
import HEP.Storage.WebDAV.Type
import HEP.Automation.MadGraph.Model
import HEP.Automation.MadGraph.Machine
import HEP.Automation.MadGraph.UserCut
import HEP.Automation.MadGraph.SetupType
import HEP.Automation.MadGraph.Model.Trip
import HEP.Automation.MadGraph.Dataset.Processes
import HEP.Automation.JobQueue.JobType
processSetup :: ProcessSetup Trip
processSetup = PS {
model = Trip
, process = preDefProcess TTBar0or1J
, processBrief = "TTBar0or1J"
, workname = "714_Trip_TTBar0or1J_TEV"
}
paramSet :: [ModelParam Trip]
paramSet = [ TripParam { massTrip = m, gRTrip = g }
| (m,g) <- (map (\x->(400,x)) [1.5,1.55..3.50] )
++ (map (\x->(600,x)) [2.5,2.55..4.50] )
++ (map (\x->(800,x)) [3.5,3.55..5.50] ) ]
{- | (m,g) <- (map (\x->(400,x)) [3.5,3.55..4.50] )
++ (map (\x->(600,x)) [4.5,4.55..5.50] )
++ (map (\x->(800,x)) [5.5,5.55..6.50] ) ] -}
-- [ (200,0.5), (200,1.0)
-- , (400,0.5), (400,1.0), (400,1.5), (400,2.0)
-- , (600,0.5), (600,1.0), (600,1.5), (600,2.0), (600,2.5)
-- , (800,0.5), (800,1.0), (800,1.5), (800,2.0), (800,2.5), (800,3.0), (800,3.5)
-- , (1000,0.5), (1000,1.0), (1000,1.5), (1000,2.0), (1000,2.5), (1000,3.0), (1000,3.5), (1000,4.0) ] ]
sets :: [Int]
sets = [1]
ucut :: UserCut
ucut = UserCut {
uc_metcut = 15.0
, uc_etacutlep = 2.7
, uc_etcutlep = 18.0
, uc_etacutjet = 2.7
, uc_etcutjet = 15.0
}
eventsets :: [EventSet]
eventsets =
[ EventSet processSetup
(RS { param = p
, numevent = 100000
, machine = TeVatron
, rgrun = Fixed
, rgscale = 200.0
, match = MLM
, cut = DefCut
, pythia = RunPYTHIA
, usercut = UserCutDef ucut -- NoUserCutDef --
, pgs = RunPGS
, jetalgo = Cone 0.4
, uploadhep = NoUploadHEP
, setnum = num
})
| p <- paramSet , num <- sets ]
webdavdir :: WebDAVRemoteDir
webdavdir = WebDAVRemoteDir "paper3/ttbar_TEV_trip_pgsscan"
| wavewave/madgraph-auto-dataset | src/HEP/Automation/MadGraph/Dataset/Set20110714set2.hs | gpl-3.0 | 2,386 | 0 | 14 | 803 | 458 | 288 | 170 | 48 | 1 |
{-# LANGUAGE FlexibleInstances #-}
{-# LANGUAGE GeneralizedNewtypeDeriving #-}
{-# LANGUAGE MultiParamTypeClasses #-}
{-# LANGUAGE TemplateHaskell #-}
{-# LANGUAGE TypeFamilies #-}
module Test.Fake
( FakeMoon(..)
, output, allowedContent, forkCount, timeoutCount
, loggedMessages, allActions, allowedActions, requireTheme
, Base(..)
, MoonTest(..)
, FMT
, unbase
, FakeMB
, evalTest, newEvalTest
, fake, fakeWith
, allowContent, allowAction, allowTerm, allowExec
-- Expectations
, computes
, isSameAs
, outputIs, outputMatches
, hasForked, hasSetupTimeout
-- reexports
, get
) where
import Control.Concurrent
import Control.Lens
import Control.Monad
import Control.Monad.Identity
import Control.Monad.State
import Data.List
import qualified Data.Map as M
import qualified Data.Vector as V
import System.Exit (ExitCode (..))
import qualified System.Timeout as T
import Test.Hspec
import Test.Hspec.Expectations
import Test.HUnit.Lang
import Moonbase.Core
import Moonbase.Theme
data FakeMoon m = FakeMoon
{ _output :: V.Vector String
, _allowedContent :: M.Map FilePath String
, _forkCount :: Int
, _timeoutCount :: Int
, _loggedMessages :: V.Vector Message
, _allActions :: M.Map String Bool
, _allowedActions :: M.Map String (Action (FakeMoon m) m)
, _quitSignal :: Bool
, _allowedTerms :: V.Vector String
, _allowedExec :: M.Map String (ExitCode, String, String)
, _requireTheme :: Bool }
emptyFakeMoon :: FakeMoon m
emptyFakeMoon = FakeMoon
{ _output = V.empty
, _allowedContent = M.empty
, _forkCount = 0
, _timeoutCount = 0
, _loggedMessages = V.empty
, _allActions = M.empty
, _allowedActions = M.empty
, _allowedTerms = V.empty
, _allowedExec = M.empty
, _quitSignal = False
, _requireTheme = False }
makeLenses ''FakeMoon
newtype MoonTest a = MoonTest (StateT (FakeMoon MoonTest) IO a)
deriving (Functor, Monad, MonadState (FakeMoon MoonTest))
type FMT = FakeMoon MoonTest
type FakeMB a = MB FMT MoonTest a
instance Applicative MoonTest where
pure = return
(<*>) = ap
instance MonadState FMT (MB FMT MoonTest) where
get = moon get
put = moon . put
evalTest :: FMT -> MoonTest a -> IO (a, FMT)
evalTest st (MoonTest f) = runStateT f st
newEvalTest :: MoonTest a -> IO (a, FMT)
newEvalTest (MoonTest f) = runStateT f emptyFakeMoon
runFakeMB :: FakeMB a -> MoonTest a
runFakeMB f = do
st <- get
eval (FakeBase st) f
fakeWith :: FakeMB a -> IO (a, FMT)
fakeWith = newEvalTest . runFakeMB
fake :: FakeMB a -> IO a
fake f = fst <$> fakeWith f
allowContent :: FilePath -> String -> FakeMB ()
allowContent path content = allowedContent . at path ?= content
allowAction :: String -> Action FMT MoonTest -> FakeMB ()
allowAction name action = allowedActions . at name ?= action
allowTerm :: [String] -> FakeMB ()
allowTerm args = allowedTerms %= (|> unwords args)
allowExec :: String -> (ExitCode, String, String) -> FakeMB ()
allowExec cmd result = allowedExec . at cmd ?= result
-- Moonbase Implementation ----------------------------------------------------
instance Moon MoonTest where
io = MoonTest . lift
puts str = output %= (|> str)
content = selectContent
fork = forkTestMoon
delay = io . threadDelay
timeout = timeoutTestMoon
exec cmd args = selectExec $ unwords (cmd:args)
instance Moonbase FMT MoonTest where
data Base FMT = FakeBase FMT
log msg = loggedMessages %= (|> msg)
theme = requireTheme .= True >> return defaultTheme
withTheme _ = requireTheme .= True
verbose = return False
add n _ = allActions . at n ?= True
terminal = selectTerminal
withTerminal _ = return () -- FIXME
quit = quitSignal .= True
unbase :: Base FMT -> FMT
unbase (FakeBase f) = f
selectExec :: String -> MoonTest (ExitCode, String, String)
selectExec cmd = do
execs <- use allowedExec
case execs ^? ix cmd of
Just result -> return result
Nothing -> io (assertFailure (unlines notAllowedExec)) >> return (ExitFailure 254, "", "")
where
notAllowedExec = [ "*** Unstubbed exec requested ***"
, "requested exec :" ++ show cmd
, "to stub use: allowEexec cmd [args]" ]
selectTerminal :: [String] -> MB FMT MoonTest ()
selectTerminal args = do
terms <- use allowedTerms
unless (V.elem (unwords args) terms) (io $ assertFailure (unlines notAllowedTerm))
where
notAllowedTerm = [ "*** Unstubbed terminal requested ***"
, "requested term:" ++ show args
, "to stub this output use: allowTerm [args]" ]
selectContent :: FilePath -> MoonTest String
selectContent path = do
ctnt <- use allowedContent
case ctnt ^? ix path of
Just c -> return c
Nothing -> io (assertFailure (unlines notAllowedContent)) >> return "FAILED"
where
notAllowedContent = [ "*** Unstubbed content requested ***"
, "requested path:" ++ show path
, "to stub this output use: allowContent <path> <content>" ]
forkTestMoon :: MoonTest () -> MoonTest ThreadId
forkTestMoon f = do
rt <- get
forkCount += 1
io $ forkIO (void $ evalTest rt f)
timeoutTestMoon :: Int -> MoonTest a -> MoonTest (Maybe a)
timeoutTestMoon ms f = do
rt <- get
timeoutCount += 1
io $ T.timeout ms (fst <$> evalTest rt f)
-- Expectations -----------------------------------------------------------
computes :: (Show a, Eq a) => FakeMB a -> a -> Expectation
computes f value = fake f `shouldReturn` value
isSameAs :: (Show a, Eq a) => FakeMB a -> FakeMB a -> Expectation
isSameAs f1 f2 = do
v2 <- fake f2
fake f1 `shouldReturn` v2
outputIs :: FakeMB a -> [String] -> Expectation
outputIs f t = do
(_, rt) <- fakeWith f
V.toList (rt ^. output) `shouldBe` t
outputMatches :: FakeMB a -> String -> Expectation
outputMatches f t = do
(_, rt) <- fakeWith f
case V.find (isInfixOf t) (rt ^. output) of
Just _ -> return ()
Nothing -> assertFailure (notFound $ rt ^. output)
where
notFound output = unlines $ [ "Could not match `" ++ t ++ "`"
, "Output is:" ] ++ V.toList (formatOutput output)
formatOutput = V.imap (\i l -> show i ++ ": " ++ l)
hasForked :: FakeMB a -> Int -> Expectation
hasForked f times = do
(_, rt) <- fakeWith f
rt ^. forkCount `shouldBe` times
hasSetupTimeout :: FakeMB a -> Int -> Expectation
hasSetupTimeout f times = do
(_, rt) <- fakeWith f
rt ^. timeoutCount `shouldBe` times
| felixsch/moonbase | test/Test/Fake.hs | gpl-3.0 | 6,739 | 0 | 15 | 1,702 | 2,114 | 1,119 | 995 | -1 | -1 |
{-# LANGUAGE FlexibleInstances, FlexibleContexts, MultiParamTypeClasses, FunctionalDependencies, BangPatterns, NoMonomorphismRestriction, ScopedTypeVariables, StandaloneDeriving, TemplateHaskell, NamedFieldPuns, FlexibleContexts, TypeFamilies, OverlappingInstances, CPP #-}
{-# OPTIONS -Wall -fno-warn-unused-binds -fno-warn-name-shadowing -fno-warn-type-defaults #-} -- disabled warnings are because Printf-TH triggers them
-- | Utility functions for Patrick Bahr's /equivalence/ package
module Equivalence(
module Equivalence.Class,
-- * Equivalence classes
SetBasedEquivalenceClass, ec_elements, ec_map, ec_mapMonotonic,
ec_singleton, ec_union, ec_join, ec_elementList, ec_rep,
-- * Equivalence relations
Equivalence, eqv_classmap, eqv_classes, eqv_eq, mkEquivalence, mkEquivalence0, eqv_classcount,
eqv_class_elements, eqv_reps, eqv_generators, eqv_equivalents, eqv_rep,
eqv_classOf, eqv_classOf_safe
) where
import EitherC
import Data.Equivalence.Monad
import Data.Function
import Data.Hashable
import Data.List(foldl')
import Data.Monoid
import HomogenousTuples
import PrettyUtil
import Element
import Equivalence.Class
import Util
import Control.DeepSeq.TH
import OrphanInstances() -- NFData Set, NFData Map
import qualified Data.Set as S
import Data.Set(Set)
import qualified Data.Map as M
import Data.Map(Map)
data SetBasedEquivalenceClass a = SetBasedEquivalenceClass {
-- | INVARIANT: This contains 'ec_rep'
ec_elements :: Set a,
-- | An arbitraily chosen Representative of the equivalence class
ec_rep :: !a
}
ec_elementList :: SetBasedEquivalenceClass a -> [a]
ec_elementList = S.toList . ec_elements
type instance Element (SetBasedEquivalenceClass a) = a
instance AsList (SetBasedEquivalenceClass a) where
asList = ec_elementList
instance Ord a => IsEquivalenceClass (SetBasedEquivalenceClass a) where
canonicalRep = ec_rep
ecSize = S.size . ec_elements
ec_map :: (Ord a, Ord b) => (a -> b) -> SetBasedEquivalenceClass a -> SetBasedEquivalenceClass b
ec_map f SetBasedEquivalenceClass {ec_elements,ec_rep} =
SetBasedEquivalenceClass {ec_elements= S.map f ec_elements, ec_rep=f ec_rep}
ec_mapMonotonic :: (Ord a, Ord b) => (a -> b) -> SetBasedEquivalenceClass a -> SetBasedEquivalenceClass b
ec_mapMonotonic f SetBasedEquivalenceClass {ec_elements,ec_rep} =
SetBasedEquivalenceClass {ec_elements= S.mapMonotonic f ec_elements, ec_rep=f ec_rep}
deriving instance (Show a, Ord a) => Show (SetBasedEquivalenceClass a)
instance Eq a => Eq (SetBasedEquivalenceClass a) where
(==) = (==) `on` ec_rep
instance Ord a => Ord (SetBasedEquivalenceClass a) where
compare = compare `on` ec_rep
ec_singleton :: Ord a => a -> SetBasedEquivalenceClass a
ec_singleton a = SetBasedEquivalenceClass (S.singleton a) a
ec_union :: Ord a => SetBasedEquivalenceClass a -> SetBasedEquivalenceClass a -> SetBasedEquivalenceClass a
ec_union (SetBasedEquivalenceClass es1 r1) (SetBasedEquivalenceClass es2 _) = SetBasedEquivalenceClass (es1 `mappend` es2) r1
data Equivalence a = Equivalence {
eqv_classmap :: Map a (SetBasedEquivalenceClass a),
-- | Distinct.
eqv_classes :: [SetBasedEquivalenceClass a],
eqv_generators :: [(a,a)]
}
type instance EquivalenceClassOf (Equivalence a) = SetBasedEquivalenceClass a
type instance Element (Equivalence a) = a
deriving instance (Show a, Ord a) => Show (Equivalence a)
instance Ord a => IsEquivalence (Equivalence a) where
eqvClassOf_safe = eqv_classOf_safe
instance Ord a => EnumerableEquivalence (Equivalence a) where
eqvClasses = eqv_classes
eqv_classOf :: Ord k => Equivalence k -> k -> SetBasedEquivalenceClass k
eqv_classOf = eqvClassOf
eqv_classOf_safe
:: Ord k =>
Equivalence k -> k -> AttemptC (SetBasedEquivalenceClass k)
eqv_classOf_safe e x =
case M.lookup x (eqv_classmap e) of
Just y -> return y
Nothing -> toAttemptC $ ($failureStr "eqv_classOf_safe: Element not in the equivalence relation")
-- | Checks whether the given elements are equivalent. Throws an error if the first argument is not in the domain of the equivalence.
eqv_eq :: (Ord a) => Equivalence a -> a -> a -> Bool
eqv_eq = eqvEquivalent
eqv_classcount :: Equivalence a -> Int
eqv_classcount = length . eqv_classes
-- | Gets the representative of the equivalence class of the given element. Throws an error if the element is not in the domain of the equivalence.
eqv_rep :: (Ord a) => Equivalence a -> a -> a
eqv_rep !e !x = ec_rep (eqv_classOf e x)
-- insertUnlessExists = Map.insertWith (curry snd)
mkEquivalence :: forall a. (Ord a) =>
[(a,a)] -- ^ Gluings
-> [a] -- ^ Additional elements that should be present in the returned relation (will only be equivalent to themselves
-- if they don't occur in the first argument)
-> Equivalence a
mkEquivalence pairs extraElements = mkEquivalence0 (pairs ++ [(a,a) | a <- extraElements])
-- | Like 'mkEquivalence', but without additional elements
mkEquivalence0 :: forall a. (Ord a) => [(a,a)] -> Equivalence a
mkEquivalence0 pairs = (runEquivM ec_singleton ec_union go)
-- this is semantically redundant but makes it possible to retrieve eqv_generators without
-- computing the other stuff
{ eqv_generators = pairs }
where
go :: forall s. EquivM s (SetBasedEquivalenceClass a) a (Equivalence a)
go = do
mapM_ (uncurry equate) pairs
-- Now capture the information contained in the state of the EquivM monad into our pure
-- Equivalence structure
let f :: EquivM s (SetBasedEquivalenceClass a) a (Equivalence a)
-> a
-> EquivM s (SetBasedEquivalenceClass a) a (Equivalence a)
f mr a = do
-- Get the recursive result
r@Equivalence {
eqv_classmap = cm0,
eqv_classes = cs0} <- mr
-- Get the equivalence class of the current element
cls <- classDesc a
return $ if a `M.member` cm0
-- if the current element is already in the result Equivalence, do nothing
then r
else r {
eqv_classmap = M.insert a cls cm0
, eqv_classes =
-- Add the equivalence class to the eqv_classes list
-- only if the current element is the representative of the
-- equivalence class (to get a distinct list)
if a == ec_rep cls
then cls : cs0
else cs0
}
foldl' f (return (Equivalence M.empty [] pairs)) allElems
allElems = catPairs pairs
instance (Ord a, Pretty a) => Pretty (SetBasedEquivalenceClass a) where
pretty = prettyClass
instance (Ord a, Pretty a) => Pretty (Equivalence a) where
pretty = prettyEquivalence
ec_join :: Ord a => SetBasedEquivalenceClass (SetBasedEquivalenceClass a) -> SetBasedEquivalenceClass a
ec_join ecs = SetBasedEquivalenceClass { ec_elements = (foldr1 S.union . fmap ec_elements . S.toList . ec_elements) ecs
, ec_rep = (ec_rep . ec_rep) ecs
}
eqv_map :: (Ord a, Ord b) => (a -> b) -> Equivalence a -> Equivalence b
eqv_map f e = mkEquivalence0 (fmap (map2 f) (eqv_generators e))
instance Hashable a => Hashable (SetBasedEquivalenceClass a) where hash = hash . ec_rep
eqv_class_elements :: (Ord a) => Equivalence a -> a -> Set a
eqv_class_elements e = ec_elements . eqvClassOf e
eqv_equivalents :: (Ord a) => Equivalence a -> a -> [a]
eqv_equivalents e = asList . eqvClassOf e
-- | Returns a list containing a represenative of each class
eqv_reps :: Equivalence b -> [b]
eqv_reps e = ec_rep `fmap` eqv_classes e
deriveNFData ''Equivalence
deriveNFData ''SetBasedEquivalenceClass
| DanielSchuessler/hstri | Equivalence.hs | gpl-3.0 | 8,364 | 0 | 20 | 2,235 | 1,891 | 1,014 | 877 | 127 | 3 |
{-# LANGUAGE OverloadedStrings #-}
-- |
-- Module : Network.Google.Internal.Body
-- Copyright : (c) 2015-2016 Brendan Hay
-- License : Mozilla Public License, v. 2.0.
-- Maintainer : Brendan Hay <brendan.g.hay@gmail.com>
-- Stability : provisional
-- Portability : non-portable (GHC extensions)
--
module Network.Google.Internal.Body where
import Control.Monad.IO.Class (MonadIO (..))
import Data.Conduit.Binary (sourceFile)
import Data.Maybe (fromMaybe)
import qualified Data.Text as Text
import Network.Google.Types (Body (..))
import Network.HTTP.Conduit (requestBodySource)
import Network.HTTP.Media (MediaType, parseAccept, (//))
import qualified Network.Mime as MIME
import System.IO
-- | Convenience function for obtaining the size of a file.
getFileSize :: MonadIO m => FilePath -> m Integer
getFileSize f = liftIO (withBinaryFile f ReadMode hFileSize)
-- | Attempt to calculate the MIME type based on file extension.
--
-- Defaults to @application/octet-stream@ if no file extension is recognised.
getMIMEType :: FilePath -> MediaType
getMIMEType =
fromMaybe ("application" // "octet-stream")
. parseAccept
. MIME.defaultMimeLookup
. Text.takeWhileEnd (/= '/')
. Text.pack
-- | Construct a 'Body' from a 'FilePath'.
--
-- This uses 'getMIMEType' to calculate the MIME type from the file extension,
-- you can use 'bodyContentType' to set a MIME type explicitly.
sourceBody :: MonadIO m => FilePath -> m Body
sourceBody f = do
n <- getFileSize f
pure $ Body
(getMIMEType f)
(requestBodySource (fromIntegral n) (sourceFile f))
| rueshyna/gogol | gogol/src/Network/Google/Internal/Body.hs | mpl-2.0 | 1,715 | 0 | 12 | 392 | 296 | 174 | 122 | -1 | -1 |
{-# LANGUAGE DataKinds #-}
{-# LANGUAGE DeriveDataTypeable #-}
{-# LANGUAGE DeriveGeneric #-}
{-# LANGUAGE FlexibleInstances #-}
{-# LANGUAGE NoImplicitPrelude #-}
{-# LANGUAGE OverloadedStrings #-}
{-# LANGUAGE RecordWildCards #-}
{-# LANGUAGE TypeFamilies #-}
{-# LANGUAGE TypeOperators #-}
{-# OPTIONS_GHC -fno-warn-duplicate-exports #-}
{-# OPTIONS_GHC -fno-warn-unused-binds #-}
{-# OPTIONS_GHC -fno-warn-unused-imports #-}
-- |
-- Module : Network.Google.Resource.File.Projects.Locations.Instances.List
-- Copyright : (c) 2015-2016 Brendan Hay
-- License : Mozilla Public License, v. 2.0.
-- Maintainer : Brendan Hay <brendan.g.hay@gmail.com>
-- Stability : auto-generated
-- Portability : non-portable (GHC extensions)
--
-- Lists all instances in a project for either a specified location or for
-- all locations.
--
-- /See:/ <https://cloud.google.com/filestore/ Cloud Filestore API Reference> for @file.projects.locations.instances.list@.
module Network.Google.Resource.File.Projects.Locations.Instances.List
(
-- * REST Resource
ProjectsLocationsInstancesListResource
-- * Creating a Request
, projectsLocationsInstancesList
, ProjectsLocationsInstancesList
-- * Request Lenses
, plilParent
, plilXgafv
, plilUploadProtocol
, plilOrderBy
, plilAccessToken
, plilUploadType
, plilFilter
, plilPageToken
, plilPageSize
, plilCallback
) where
import Network.Google.File.Types
import Network.Google.Prelude
-- | A resource alias for @file.projects.locations.instances.list@ method which the
-- 'ProjectsLocationsInstancesList' request conforms to.
type ProjectsLocationsInstancesListResource =
"v1" :>
Capture "parent" Text :>
"instances" :>
QueryParam "$.xgafv" Xgafv :>
QueryParam "upload_protocol" Text :>
QueryParam "orderBy" Text :>
QueryParam "access_token" Text :>
QueryParam "uploadType" Text :>
QueryParam "filter" Text :>
QueryParam "pageToken" Text :>
QueryParam "pageSize" (Textual Int32) :>
QueryParam "callback" Text :>
QueryParam "alt" AltJSON :>
Get '[JSON] ListInstancesResponse
-- | Lists all instances in a project for either a specified location or for
-- all locations.
--
-- /See:/ 'projectsLocationsInstancesList' smart constructor.
data ProjectsLocationsInstancesList =
ProjectsLocationsInstancesList'
{ _plilParent :: !Text
, _plilXgafv :: !(Maybe Xgafv)
, _plilUploadProtocol :: !(Maybe Text)
, _plilOrderBy :: !(Maybe Text)
, _plilAccessToken :: !(Maybe Text)
, _plilUploadType :: !(Maybe Text)
, _plilFilter :: !(Maybe Text)
, _plilPageToken :: !(Maybe Text)
, _plilPageSize :: !(Maybe (Textual Int32))
, _plilCallback :: !(Maybe Text)
}
deriving (Eq, Show, Data, Typeable, Generic)
-- | Creates a value of 'ProjectsLocationsInstancesList' with the minimum fields required to make a request.
--
-- Use one of the following lenses to modify other fields as desired:
--
-- * 'plilParent'
--
-- * 'plilXgafv'
--
-- * 'plilUploadProtocol'
--
-- * 'plilOrderBy'
--
-- * 'plilAccessToken'
--
-- * 'plilUploadType'
--
-- * 'plilFilter'
--
-- * 'plilPageToken'
--
-- * 'plilPageSize'
--
-- * 'plilCallback'
projectsLocationsInstancesList
:: Text -- ^ 'plilParent'
-> ProjectsLocationsInstancesList
projectsLocationsInstancesList pPlilParent_ =
ProjectsLocationsInstancesList'
{ _plilParent = pPlilParent_
, _plilXgafv = Nothing
, _plilUploadProtocol = Nothing
, _plilOrderBy = Nothing
, _plilAccessToken = Nothing
, _plilUploadType = Nothing
, _plilFilter = Nothing
, _plilPageToken = Nothing
, _plilPageSize = Nothing
, _plilCallback = Nothing
}
-- | Required. The project and location for which to retrieve instance
-- information, in the format
-- projects\/{project_id}\/locations\/{location}. In Cloud Filestore,
-- locations map to GCP zones, for example **us-west1-b**. To retrieve
-- instance information for all locations, use \"-\" for the {location}
-- value.
plilParent :: Lens' ProjectsLocationsInstancesList Text
plilParent
= lens _plilParent (\ s a -> s{_plilParent = a})
-- | V1 error format.
plilXgafv :: Lens' ProjectsLocationsInstancesList (Maybe Xgafv)
plilXgafv
= lens _plilXgafv (\ s a -> s{_plilXgafv = a})
-- | Upload protocol for media (e.g. \"raw\", \"multipart\").
plilUploadProtocol :: Lens' ProjectsLocationsInstancesList (Maybe Text)
plilUploadProtocol
= lens _plilUploadProtocol
(\ s a -> s{_plilUploadProtocol = a})
-- | Sort results. Supported values are \"name\", \"name desc\" or \"\"
-- (unsorted).
plilOrderBy :: Lens' ProjectsLocationsInstancesList (Maybe Text)
plilOrderBy
= lens _plilOrderBy (\ s a -> s{_plilOrderBy = a})
-- | OAuth access token.
plilAccessToken :: Lens' ProjectsLocationsInstancesList (Maybe Text)
plilAccessToken
= lens _plilAccessToken
(\ s a -> s{_plilAccessToken = a})
-- | Legacy upload protocol for media (e.g. \"media\", \"multipart\").
plilUploadType :: Lens' ProjectsLocationsInstancesList (Maybe Text)
plilUploadType
= lens _plilUploadType
(\ s a -> s{_plilUploadType = a})
-- | List filter.
plilFilter :: Lens' ProjectsLocationsInstancesList (Maybe Text)
plilFilter
= lens _plilFilter (\ s a -> s{_plilFilter = a})
-- | The next_page_token value to use if there are additional results to
-- retrieve for this list request.
plilPageToken :: Lens' ProjectsLocationsInstancesList (Maybe Text)
plilPageToken
= lens _plilPageToken
(\ s a -> s{_plilPageToken = a})
-- | The maximum number of items to return.
plilPageSize :: Lens' ProjectsLocationsInstancesList (Maybe Int32)
plilPageSize
= lens _plilPageSize (\ s a -> s{_plilPageSize = a})
. mapping _Coerce
-- | JSONP
plilCallback :: Lens' ProjectsLocationsInstancesList (Maybe Text)
plilCallback
= lens _plilCallback (\ s a -> s{_plilCallback = a})
instance GoogleRequest ProjectsLocationsInstancesList
where
type Rs ProjectsLocationsInstancesList =
ListInstancesResponse
type Scopes ProjectsLocationsInstancesList =
'["https://www.googleapis.com/auth/cloud-platform"]
requestClient ProjectsLocationsInstancesList'{..}
= go _plilParent _plilXgafv _plilUploadProtocol
_plilOrderBy
_plilAccessToken
_plilUploadType
_plilFilter
_plilPageToken
_plilPageSize
_plilCallback
(Just AltJSON)
fileService
where go
= buildClient
(Proxy ::
Proxy ProjectsLocationsInstancesListResource)
mempty
| brendanhay/gogol | gogol-file/gen/Network/Google/Resource/File/Projects/Locations/Instances/List.hs | mpl-2.0 | 6,970 | 0 | 20 | 1,618 | 1,048 | 607 | 441 | 148 | 1 |
import Data.Ratio
import Data.List
fm = flip (-)
-- Special form(s) of div which maps x / 0 to 0
cdiv a 0 = 0
cdiv a b = a / b
fcdiv 0 b = 0
fcdiv a b = b / a
genChoose _ 0 = [[]]
genChoose l n =
concat $
map (\h -> map (h:) (genChoose l (n-1))) l
ops :: [Ratio Integer -> Ratio Integer -> Ratio Integer]
ops = [fm, fcdiv, (-), (+), (*), (cdiv) ]
opCombos = genChoose ops 3
doEval [a,b,c,d] [o1,o2,o3] =
o1 a $ o2 b $ o3 c d
genSeq s =
let perms = permutations s
in sort $ nub $ map numerator $ filter (0<) $ filter ((==1).denominator) $ [ doEval p op | p <- perms, op <- opCombos ]
matchSeq s =
length $ takeWhile (\(a,b) -> a==b) $ zip [1..] (genSeq (map (%1) s))
allSets =
[ [a,b,c,d] | d <- [1..9], c <- [1..d-1], b <- [1..c-1], a <- [1..b-1] ]
prob93 = snd $ maximum $ zip (map matchSeq allSets) allSets
main = print prob93
| jdavidberger/project-euler | prob93.hs | lgpl-3.0 | 855 | 0 | 13 | 210 | 522 | 281 | 241 | 25 | 1 |
module VMerge where
import CLaSH.Prelude
topEntity :: (Vec 2 Int,Vec 2 Int) -> Vec 4 Int
topEntity (x,y) = merge x y
testInput :: Signal (Vec 2 Int,Vec 2 Int)
testInput = signal (iterateI (+1) 1,iterateI (+1) 3)
expectedOutput :: Signal (Vec 4 Int) -> Signal Bool
expectedOutput = outputVerifier ((1:>3:>2:>4:>Nil):>Nil)
| ggreif/clash-compiler | tests/shouldwork/Vector/VMerge.hs | bsd-2-clause | 325 | 0 | 12 | 55 | 171 | 90 | 81 | -1 | -1 |
import Data.Array
import qualified Data.Array.Unboxed as U
import qualified Data.Foldable as Fld
import Data.List
import qualified Data.Map as Map
import Data.Maybe
import qualified Data.Sequence as Seq
buildArr la a b f = res
where
res = la (a, b) (map (f (res !)) [a .. b])
arrlst i j xs = map (xs !) [i .. j]
ordr cs acc n = Fld.foldr (\x acc -> ordr cs acc x) (n : acc) (cs ! n)
intersects (x1, x2) (y1, y2) = (x1 <= y1 && y1 <= x2) || (x1 <= y2 && y2 <= x2) || (y1 <= x1 && x1 <= y2)
data BinTree = BtEmpty Int | BtLeaf Int Int | BtNode Int Int BinTree BinTree deriving (Show, Eq, Ord)
data RangeTree = RtEmpty | RtLeaf Int BinTree | RtNode Int Int Int BinTree RangeTree RangeTree deriving (Show, Eq, Ord)
rtcons _ [] = (RtEmpty, (0, []))
rtcons _ [(x, y)] = (RtLeaf x (BtLeaf y 1), (1, [y]))
rtcons (l, r) xs = (RtNode m l r (fst $ btcons 0 bt) lrt rrt, bt)
where
m = (l + r) `div` 2
(xl, xr) = part m xs
(lrt, lbt) = rtcons (l, m) xl
(rrt, rbt) = rtcons (succ m, r) xr
bt = merge lbt rbt
css rt l r = css' rt l r []
where
css' RtEmpty _ _ acc = acc
css' (RtLeaf x t) l r acc = if l <= x && x <= r then t : acc else acc
css' (RtNode x l' r' t lt rt) l r acc =
if r < l' || r' < l
then acc
else
if l <= l' && r' <= r
then t : acc
else css' lt l r (css' rt l r acc)
boundy rt xl xh yh k = bound (css rt xl xh) 0 yh k
bound ts yl yh k =
if yl == yh
then yh
else
if kym > k
then bound ts yl (pred ym) k
else
if kym < k
then bound ts (succ ym) yh k
else bound ts yl ym k
where
ym = (yl + yh) `div` 2
kym = btsbound ts ym
btsbound ts x = sum $ map (\t -> btbound t x) ts
btcons p (0, xs) = (BtEmpty p, xs)
btcons p (1, (x : xs)) = (BtLeaf x (p + 1), xs)
btcons p (n, xs) = (BtNode x (p + m + 1) l r, xs'')
where
m = n `div` 2
(l, (x : xs')) = btcons p (m, xs)
(r, xs'') = btcons (p + m + 1) (n - m - 1, xs')
btbound (BtEmpty k) _ = k
btbound (BtLeaf x' k) x =
if x' <= x
then k
else 0
btbound (BtNode x' k l r) x =
if x == x'
then k
else
if x < x'
then btbound l x
else max k $ btbound r x
merge (nl, xl) (nr, xr) = (nl + nr, merge' xl xr)
where
merge' xs [] = xs
merge' [] ys = ys
merge' xs@(x : xs') ys@(y : ys') =
if x < y
then x : merge' xs' ys
else y : merge' xs ys'
part m xs = part' [] [] xs
where
part' l r [] = (l, r)
part' l r (e@(x, y) : xs) =
if x <= m
then part' (e : l) r xs
else part' l (e : r) xs
main = do
pstr <- getLine
let (n : q : _) = map read (words pstr)
es <- mapM (const readEdge) [1 .. pred n]
let mchildren = Map.fromListWith (Seq.><) (map (\(a, b) -> (b, Seq.singleton a)) es)
let children = buildArr listArray 1 n (\_ x -> Fld.toList $ if x `Map.member` mchildren then fromJust $ x `Map.lookup` mchildren else Seq.empty)
let ranks = buildArr U.listArray 1 n (\mem x -> Fld.foldl' (+) 0 ((\x -> succ $ mem x) `map` (children ! x)))
sstr <- getLine
let sals = (map read $ words sstr) :: [Int]
let ordering = ordr children [] 1
let toord = (U.array (1, n) $ zip ordering [1 ..]) :: Array Int Int
let fromord = (U.listArray (1, n) ordering) :: Array Int Int
let ints = buildArr U.listArray 1 n (\mem x -> if null $ children ! (fromord ! x) then x else mem $ toord ! (head (children ! (fromord ! x))))
let sorted = map snd (sort $ zip sals [1 .. n])
let tosrt = (U.array (1, n) $ zip sorted [1 ..]) :: Array Int Int
let fromsrt = (U.listArray (1, n) sorted) :: Array Int Int
let ordsrt = (U.listArray (1, n) (((tosrt !) . (fromord !)) `map` [1 .. n])) :: Array Int Int
let (rt, _) = rtcons (1, n) (map (\x -> (x, tosrt ! (fromord ! x))) [1 .. n])
tst q 0 (toord, fromord, tosrt, fromsrt, ordsrt, ints, rt, n)
tst 0 _ _ = return ()
tst q d dt@(toord, fromord, tosrt, fromsrt, ordsrt, ints, t, n) = do
qstr <- getLine
let (v : k : _) = map read (words qstr)
let vo = toord ! (v + d)
let lo = ints ! vo
let ro = pred vo
let dord = boundy t lo ro n k
let d' = fromsrt ! dord
putStrLn $ show d'
tst (pred q) d' dt
readEdge :: IO (Int, Int)
readEdge = do
estr <- getLine
let (a : b : _) = map read (words estr)
return (a, b)
| pbl64k/HackerRank-Contests | 2014-06-20-FP/BoleynSalary/bs.accepted.hs | bsd-2-clause | 4,701 | 0 | 20 | 1,696 | 2,475 | 1,319 | 1,156 | 108 | 6 |
module HEP.ModelScan.MSSMScan.Parse where
import Debug.Trace
import qualified Data.ByteString.Lazy.Char8 as B
import Data.ByteString.Lex.Lazy.Double
import HEP.ModelScan.MSSMScan.Model
import HEP.Physics.MSSM.OutputPhys
isRight (Left x) = False
isRight (Right x) = True
unRight (Right x) = x
unRight (Left x) = undefined
isJust Nothing = False
isJust _ = True
unJust (Just x) = x
unJust Nothing = undefined
mergeresult :: [(Int,a)] -> [(Int,b)]
-> [(Int,(a,b))]
mergeresult [] _ = []
mergeresult _ [] = []
mergeresult (x@(idx,restx):xs) (y@(idy,resty):ys) =
if idx == idy
then restx `seq` resty `seq` (idx,(restx,resty)) : mergeresult xs ys
else if idx < idy
then mergeresult xs (y:ys)
else []
parseOutput :: B.ByteString -> Maybe (Int,OutputPhys)
parseOutput ostr = do let chunks = B.split ' ' ostr
(a1:a2:a3:a4:a5:a6:a7:a8:a9:a10
:a11:a12:a13:a14:a15:a16:a17:a18:a19:a20
:a21:a22:a23:a24:a25:a26:a27:a28:a29:a30
:a31:a32:a33:a34:a35:a36:a37:a38:a39:a40
:a41:a42:a43:a44) = filter (not. B.null) chunks
myint x = do y <- B.readInt x
return (fst y)
mydouble x = do y <- readDouble x
if (not. B.null .snd) y
then do y' <- readDouble (tempsol x)
return (fst y')
else return (fst y)
tempsol str = let (fore,rear) = B.break (=='.') str
str' = fore `B.append` B.pack ".0" `B.append` B.tail rear
in str'
id' <- myint a1
let id = if (id' `mod` 10000 == 0)
then trace ("id = " ++ show id') id'
else id'
data_Mh <- mydouble a2
data_MHH <- mydouble a3
data_MH3 <- mydouble a4
data_MHc <- mydouble a5
data_MNE1 <- mydouble a6
data_MNE2 <- mydouble a7
data_MNE3 <- mydouble a8
data_MNE4 <- mydouble a9
data_MC1 <- mydouble a10
data_MC2 <- mydouble a11
data_MSG <- mydouble a12
data_MSuL <- mydouble a13
data_MSdL <- mydouble a14
data_MSeL <- mydouble a15
data_MSne <- mydouble a16
data_MSuR <- mydouble a17
data_MSdR <- mydouble a18
data_MSeR <- mydouble a19
data_MScL <- mydouble a20
data_MSsL <- mydouble a21
data_MSmL <- mydouble a22
data_MSnm <- mydouble a23
data_MScR <- mydouble a24
data_MSsR <- mydouble a25
data_MSmR <- mydouble a26
data_MSt1 <- mydouble a27
data_MSb1 <- mydouble a28
data_MSl1 <- mydouble a29
data_MSn1 <- mydouble a30
data_MSt2 <- mydouble a31
data_MSb2 <- mydouble a32
data_MSl2 <- mydouble a33
data_deltarho <- mydouble a34
data_gmuon <- mydouble a35
data_bsgnlo <- mydouble a36
data_bsmumu <- mydouble a37
data_bino <- mydouble a38
data_wino <- mydouble a39
data_higgsino1 <- mydouble a40
data_higgsino2 <- mydouble a41
data_micro_Xf <- mydouble a42
data_micro_Omega <- mydouble a43
let output = OutputPhys data_Mh data_MHH data_MH3 data_MHc
data_MNE1 data_MNE2 data_MNE3 data_MNE4
data_MC1 data_MC2 data_MSG data_MSuL
data_MSdL data_MSeL data_MSne data_MSuR
data_MSdR data_MSeR data_MScL data_MSsL
data_MSmL data_MSnm data_MScR data_MSsR
data_MSmR data_MSt1 data_MSb1 data_MSl1
data_MSn1 data_MSt2 data_MSb2 data_MSl2
data_deltarho data_gmuon data_bsgnlo data_bsmumu
data_bino data_wino data_higgsino1 data_higgsino2
data_micro_Xf data_micro_Omega
return (id, output)
newparsestr :: (Model a) => a -> B.ByteString -> B.ByteString
-> [(Int,(ModelInput a,OutputPhys))]
newparsestr mdl str1 str2 =
let strlines1 = B.lines str1
inputresult = {-# SCC "inputresult" #-} zip [1..] $ map parseInput strlines1
strlines2 = B.lines str2
outputresult' = map (parseOutput) strlines2
outputresult'' = filter isJust outputresult'
outputresult = map unJust outputresult''
combinedresult = mergeresult inputresult outputresult
in combinedresult
| wavewave/MSSMScan | src/HEP/ModelScan/MSSMScan/Parse.hs | bsd-2-clause | 5,897 | 1 | 53 | 2,937 | 1,479 | 723 | 756 | 110 | 3 |
{-|
Module : Data.Number.MPFR.Integer
Description : Integer related functions
Copyright : (c) Aleš Bizjak
License : BSD3
Maintainer : ales.bizjak0@gmail.com
Stability : experimental
Portability : non-portable
For documentation on particular functions see
<http://www.mpfr.org/mpfr-chttp://www.mpfr.org/mpfr-current/mpfr.html#Integer-Related-Functions>
-}
{-# INCLUDE <mpfr.h> #-}
{-# INCLUDE <chsmpfr.h> #-}
module Data.Number.MPFR.Integer where
import Data.Number.MPFR.Internal
rint :: RoundMode -> Precision -> MPFR -> MPFR
rint r p = fst . rint_ r p
ceil :: Precision -> MPFR -> MPFR
ceil p = fst . ceil_ p
floor :: Precision -> MPFR -> MPFR
floor p = fst . floor_ p
round :: Precision -> MPFR -> MPFR
round p = fst . round_ p
trunc :: Precision -> MPFR -> MPFR
trunc p = fst . trunc_ p
rintCeil :: RoundMode -> Precision -> MPFR -> MPFR
rintCeil r p = fst . rintCeil_ r p
rintFloor :: RoundMode -> Precision -> MPFR -> MPFR
rintFloor r p = fst . rintFloor_ r p
rintRound :: RoundMode -> Precision -> MPFR -> MPFR
rintRound r p = fst . rintRound_ r p
rintTrunc :: RoundMode -> Precision -> MPFR -> MPFR
rintTrunc r p = fst . rintTrunc_ r p
modf :: RoundMode
-> Precision -- ^ precision to integral part
-> Precision -- ^ precision to fractional part
-> MPFR
-> (MPFR, MPFR) -- ^ return (integral part, fractional part)
modf r p p' d = case modf_ r p p' d of
(a, b, _) -> (a, b)
frac :: RoundMode -> Precision -> MPFR -> MPFR
frac r p = fst . frac_ r p
fmod :: RoundMode -> Precision -> MPFR -> MPFR -> MPFR
fmod r p d = fst . fmod_ r p d
remainder :: RoundMode -> Precision -> MPFR -> MPFR -> MPFR
remainder r p d = fst . remainder_ r p d
remquo :: RoundMode -> Precision -> MPFR -> MPFR -> (MPFR, Int)
remquo r p d d' = case remquo_ r p d d' of
(a, b, _) -> (a, b)
rint_ :: RoundMode -> Precision -> MPFR -> (MPFR, Int)
rint_ r p d = withMPFR r p d mpfr_rint
ceil_ :: Precision -> MPFR -> (MPFR, Int)
ceil_ p d = withMPFRR p d mpfr_ceil
floor_ :: Precision -> MPFR -> (MPFR, Int)
floor_ p d = withMPFRR p d mpfr_floor
round_ :: Precision -> MPFR -> (MPFR, Int)
round_ p d = withMPFRR p d mpfr_round
trunc_ :: Precision -> MPFR -> (MPFR, Int)
trunc_ p d = withMPFRR p d mpfr_trunc
rintCeil_ :: RoundMode -> Precision -> MPFR -> (MPFR, Int)
rintCeil_ r p d = withMPFR r p d mpfr_rint_ceil
rintFloor_ :: RoundMode -> Precision -> MPFR -> (MPFR, Int)
rintFloor_ r p d = withMPFR r p d mpfr_rint_floor
rintRound_ :: RoundMode -> Precision -> MPFR -> (MPFR, Int)
rintRound_ r p d = withMPFR r p d mpfr_rint_round
rintTrunc_ :: RoundMode -> Precision -> MPFR -> (MPFR, Int)
rintTrunc_ r p d = withMPFR r p d mpfr_rint_trunc
modf_ :: RoundMode
-> Precision -- ^ precision to compute integral part
-> Precision -- ^ precision to compute fractional part
-> MPFR
-> (MPFR, MPFR, Int)
modf_ r p p' d = unsafePerformIO go
where go = do ls <- mpfr_custom_get_size (fromIntegral p)
fp <- mallocForeignPtrBytes (fromIntegral ls)
ls' <- mpfr_custom_get_size (fromIntegral p')
fp' <- mallocForeignPtrBytes (fromIntegral ls')
alloca $ \p1 -> do
pokeDummy p1 fp (fromIntegral ls)
alloca $ \p2 -> do
pokeDummy p2 fp' (fromIntegral ls')
with d $ \p3 -> do
r3 <- mpfr_modf p1 p2 p3 ((fromIntegral . fromEnum) r)
r1 <- peekP p1 fp
r2 <- peekP p2 fp'
return (r1, r2, fromIntegral r3)
frac_ :: RoundMode -> Precision -> MPFR -> (MPFR, Int)
frac_ r p d = withMPFR r p d mpfr_frac
fmod_ :: RoundMode -> Precision -> MPFR -> MPFR -> (MPFR,Int)
fmod_ r p d d' = withMPFRsBA r p d d' mpfr_fmod
remainder_ :: RoundMode -> Precision -> MPFR -> MPFR -> (MPFR,Int)
remainder_ r p d d' = withMPFRsBA r p d d' mpfr_remainder
remquo_ :: RoundMode -> Precision -> MPFR -> MPFR -> (MPFR, Int, Int)
remquo_ r p d d' = unsafePerformIO go
where go = do ls <- mpfr_custom_get_size (fromIntegral p)
fp <- mallocForeignPtrBytes (fromIntegral ls)
alloca $ \p1 -> do
pokeDummy p1 fp p
with d $ \p2 ->
with d' $ \p3 ->
alloca $ \p4 -> do
r3 <- mpfr_remquo p1 p4 p2 p3 ((fromIntegral . fromEnum) r)
r1 <- peekP p1 fp
r2 <- peek p4
return (r1, fromIntegral r2, fromIntegral r3)
isInteger :: MPFR -> Bool
isInteger d = withMPFRB d mpfr_integer_p /= 0
| ekmett/hmpfr | src/Data/Number/MPFR/Integer.hs | bsd-3-clause | 4,960 | 0 | 26 | 1,630 | 1,605 | 818 | 787 | 94 | 1 |
{-# LANGUAGE CPP, FlexibleContexts#-}
module Tools.TimePlot.Conf (
ConcreteConf(..),
Conf,
readConf
) where
import Text.Regex.TDFA
import Text.Regex.TDFA.ByteString
import Data.Time hiding (parseTime)
import Data.Time.Parse
import Data.List
import Graphics.Rendering.Chart
import qualified Data.ByteString.Char8 as S
import Data.ByteString.Lex.Fractional
import Unsafe.Coerce
import Tools.TimePlot.Types
data ConcreteConf t =
ConcreteConf {
inFile :: !FilePath,
parseTime :: !(S.ByteString -> Maybe (t, S.ByteString)),
-- Input track -> (chart kind, suffix to append to track name for N:1 out:in mapping)
chartKindF :: !(S.ByteString -> [(ChartKind t, S.ByteString)]),
fromTime :: !(Maybe t),
toTime :: !(Maybe t),
transformLabel :: !(t -> String -> String),
outFile :: !FilePath,
outFormat :: !OutFormat,
outResolution :: !(Int,Int)
}
type Conf = ConcreteConf LocalTime
data KindChoiceOperator = Cut | Accumulate
readConf :: [String] -> Conf
readConf args = readConf' parseTime
where
pattern = case (words $ single "time format" "-tf" ("%Y-%m-%d %H:%M:%OS")) of
"date":f -> S.pack (unwords f)
f -> S.pack (unwords f)
Just (ourBaseTime,_) = strptime "%Y-%m-%d %H:%M:%OS" "1900-01-01 00:00:00"
{-# NOINLINE ourStrptime #-}
ourStrptime :: S.ByteString -> Maybe (LocalTime, S.ByteString)
ourStrptime = if pattern == S.pack "elapsed"
then \s -> do
(d, s') <- readSigned readDecimal s
return (fromSeconds d ourBaseTime `add` ourBaseTime, s')
else strptime pattern
parseTime s = ourStrptime s
int2double = fromIntegral :: Int -> Double
single desc name def = case (getArg name 1 args) of
[[r]] -> r
[] -> def
_ -> error $ "Single argument expected for: "++desc++" ("++name++")"
readConf' :: (S.ByteString -> Maybe (LocalTime, S.ByteString)) -> ConcreteConf LocalTime
readConf' parseTime = ConcreteConf {inFile=inFile, outFile=outFile, outFormat=outFormat, outResolution=outRes,
chartKindF=chartKindF, parseTime=parseTime, fromTime=fromTime, toTime=toTime,
transformLabel=transformLabel}
where
inFile = single "input file" "-if" (error "No input file (-if) specified")
outFile = single "output file" "-o" (error "No output file (-o) specified")
outFormat = maybe OutPNG id $ lookup (single "output format" "-of" (name2format outFile)) $
[("png",OutPNG), ("pdf",OutPDF), ("ps",OutPS), ("svg",OutSVG)]
where
name2format = reverse . takeWhile (/='.') . reverse
outRes = parseRes $ single "output resolution" "-or" "640x480"
where
parseRes s = case break (=='x') s of (h,_:v) -> (read h,read v)
forceList :: [a] -> ()
forceList = foldr seq ()
chartKindF = forceList [forceList plusKinds, forceList minusKinds, forceList defaultKindsPlus, defaultKindMinus `seq` ()] `seq` kindByRegex $
[(Cut, matches regex, parseKind0 (words kind)) | [regex,kind] <- getArg "-k" 2 args] ++
[(Accumulate, matches regex, parseKind0 (words kind)) | [regex,kind] <- getArg "+k" 2 args]
where
plusKinds = [parseKind0 (words kind) | [regex, kind] <- getArg "+k" 2 args]
minusKinds = [parseKind0 (words kind) | [regex, kind] <- getArg "-k" 2 args]
kindByRegex rks s = if null specifiedKinds then [defaultKindMinus] else specifiedKinds
where
specifiedKinds = defaultKindsPlus ++
[k | (Accumulate, p, k) <- rks, p s] ++
case [k | (Cut, p, k) <- rks, p s] of {k:_ -> [k]; _ -> []}
matches regex = matchTest (makeRegexOpts defaultCompOpt (ExecOption {captureGroups = False}) regex)
fromTime = fst `fmap` (parseTime . S.pack $ single "minimum time (inclusive)" "-fromTime" "")
toTime = fst `fmap` (parseTime . S.pack $ single "maximum time (exclusive)" "-toTime" "")
baseTime = if pattern == S.pack "elapsed"
then Just ourBaseTime
else (fst `fmap` (parseTime . S.pack $ single "base time" "-baseTime" ""))
transformLabel t s = case baseTime of
Nothing -> s
Just bt -> showDelta t bt
parseKind0 (('+':suffix):k) = (parseKind k, S.pack "." `S.append` S.pack suffix)
parseKind0 k = (parseKind k, S.empty)
parseKind :: [String] -> ChartKind LocalTime
parseKind ["acount", n ] = KindACount {binSize=read n}
parseKind ("acount":_) = error "acount requires a single numeric argument, bin size, e.g.: -dk 'acount 1'"
parseKind ["count", n ] = KindCount {binSize=read n}
parseKind ("count":_) = error "count requires a single numeric argument, bin size, e.g.: -dk 'count 1'"
parseKind ["apercent",n,b] = KindAPercent {binSize=read n,baseCount=read b}
parseKind ("apercent":_) = error "apercent requires two numeric arguments: bin size and base value, e.g.: -dk 'apercent 1 480'"
parseKind ["afreq", n ] = KindAFreq {binSize=read n}
parseKind ("afreq":_) = error "afreq requires a single numeric argument, bin size, e.g.: -dk 'afreq 1'"
parseKind ["freq", n ] = KindFreq {binSize=read n,style=BarsStacked}
parseKind ["freq", n,s] = KindFreq {binSize=read n,style=parseStyle s}
parseKind ("freq":_) = error $ "freq requires a single numeric argument, bin size, e.g.: -dk 'freq 1', " ++
"or two arguments, e.g.: -dk 'freq 1 clustered'"
parseKind ["hist", n ] = KindHistogram {binSize=read n,style=BarsStacked}
parseKind ["hist", n,s] = KindHistogram {binSize=read n,style=parseStyle s}
parseKind ("hist":_) = error $ "hist requires a single numeric argument, bin size, e.g.: -dk 'hist 1', " ++
"or two arguments, e.g.: -dk 'hist 1 clustered'"
parseKind ["event" ] = KindEvent
parseKind ("event":_) = error "event requires no arguments"
parseKind ["quantile",b,q] = KindQuantile {binSize=read b, quantiles=read ("["++q++"]")}
parseKind ("quantile":_) = error $ "quantile requres two arguments: bin size and comma-separated " ++
"(without spaces!) quantiles, e.g.: -dk 'quantile 1 0.5,0.75,0.9'"
parseKind ["binf", b,q] = KindBinFreq {binSize=read b, delims =read ("["++q++"]")}
parseKind ("binf":_) = error $ "binf requres two arguments: bin size and comma-separated " ++
"(without spaces!) threshold values, e.g.: -dk 'binf 1 10,50,100,200,500'"
parseKind ["binh", b,q] = KindBinHist {binSize=read b, delims =read ("["++q++"]")}
parseKind ("binh":_) = error $ "binh requres two arguments: bin size and comma-separated " ++
"(without spaces!) threshold values, e.g.: -dk 'binh 1 10,50,100,200,500'"
parseKind ["lines" ] = KindLines
parseKind ("lines":_) = error "lines requires no arguments"
parseKind ["dots" ] = KindDots { alpha = 1 }
parseKind ["dots", a ] = KindDots { alpha = read a }
parseKind ("dots":_) = error "dots requires 0 or 1 arguments (the argument is alpha value: 0 = transparent, 1 = opaque, default 1)"
parseKind ["cumsum", b ] = KindCumSum {binSize=read b, subtrackStyle=SumStacked}
parseKind ["cumsum", b,s] = KindCumSum {binSize=read b, subtrackStyle=parseSubtrackStyle s}
parseKind ("cumsum":_) = error $ "cumsum requires 1 or 2 arguments (bin size and subtrack style), e.g.: " ++
"-dk 'cumsum 10' or -dk 'cumsum 10 stacked'"
parseKind ["sum", b ] = KindSum {binSize=read b, subtrackStyle=SumStacked}
parseKind ["sum", b,s] = KindSum {binSize=read b, subtrackStyle=parseSubtrackStyle s}
parseKind ("sum":_) = error $ "sum requires one or two arguments: bin size and optionally " ++
"subtrack style, e.g.: -dk 'sum 1' or -dk 'sum 1 stacked'"
parseKind ("duration":"drop":ws) = KindDuration {subKind=parseKind ws, dropSubtrack=True}
parseKind ("duration":ws) = KindDuration {subKind=parseKind ws, dropSubtrack=False}
parseKind (('w':'i':'t':'h':'i':'n':'[':sep:"]"):ws)
= KindWithin {subKind=parseKind ws, mapName = fst . S.break (==sep)}
parseKind ["none" ] = KindNone
parseKind ("none":_) = error "none requires no arguments"
parseKind ["unspecified" ] = KindUnspecified
parseKind ("unspecified":_)= error "unspecified requires no arguments"
parseKind ws = error ("Unknown diagram kind " ++ unwords ws)
defaultKindMinus = parseKind0 $ words $ single "default kind" "-dk" "unspecified"
defaultKindsPlus = map (parseKind0 . words . head) $ getArg "+dk" 1 args
parseStyle "stacked" = BarsStacked
parseStyle "clustered" = BarsClustered
parseSubtrackStyle "stacked" = SumStacked
parseSubtrackStyle "overlayed" = SumOverlayed
-- getArg "-a" 2 ["-b", "1", "-a", "2", "q", "r", "-c", "3", "-a", "x"] =
-- [["2", "q"], ["x"]]
getArg :: String -> Int -> [String] -> [[String]]
getArg name arity args = [take arity as | (t:as) <- tails args, t==name]
| jkff/timeplot | Tools/TimePlot/Conf.hs | bsd-3-clause | 10,055 | 0 | 20 | 3,083 | 2,745 | 1,500 | 1,245 | 156 | 52 |
--
-- @file
--
-- @brief Translates the parsed intermediate specifications into a compilable Haskell program using our EDSL
--
-- @copyright BSD License (see LICENSE.md or https://www.libelektra.org)
--
module Elektra.SpecTranslator (KeySpecification (..), translateSpecifications) where
import Elektra.Specifications
import Data.Char (isAlphaNum)
import Data.Map (Map)
import Data.Maybe (catMaybes, isJust, fromMaybe)
import Data.List (sortBy)
import Data.Function (on)
import Unsafe.Coerce
import Language.Haskell.Exts.Build
import Language.Haskell.Exts.Syntax
import Language.Haskell.Exts.Parser
import Language.Haskell.Exts.Extension
import qualified Data.Map.Strict as M
type FunctionMap = Map TypeName TypeSpecification
translateSpecifications :: [TypeSpecification] -> [KeySpecification] -> Module ()
translateSpecifications ts ks = mkModule $ concatMap translateTypeSpecification fts ++ concatMap (translateKeySpecification functions) filteredKeyDefinitions
where
fts = filter (isJust . implementation) ts
functions = M.fromList [(tySpecName t, t) | t <- fts]
filteredKeyDefinitions = filter (not . null . path) ks
translateTypeSpecification :: TypeSpecification -> [Decl ()]
translateTypeSpecification t = catMaybes [typeSig, impl <$> implementation t]
where
typeSig = typeSig' <$> signature t
impl = let repack = foldl1 (\(FunBind () x) (FunBind _ y) -> FunBind () (x ++ y))
parseMode = defaultParseMode { parseFilename = renamedTySpecName t, extensions = [EnableExtension DataKinds]}
in repack . map (unsafeCoerce . fromParseResult . parseDeclWithMode parseMode)
funTypes = foldr1 (TyFun ()) . map convertRegexTypeParameter
constraint [] = Nothing
constraint c = Just $ CxTuple () (map asst c)
asst (RegexConstraint a p) = AppA () (name a) [convertRegexType p]
typeSig' (TypeSignature c p) = TypeSig () [name . pathToDeclName $ renamedTySpecName t] $ TyForall () Nothing (constraint c) (funTypes p)
convertRegexTypeParameter :: RegexTypeParam -> Type ()
convertRegexTypeParameter (RegexTypeParam r _) = convertRegexType r
convertRegexType :: RegexType -> Type ()
convertRegexType (RegexTypeApp a b) = TyApp () (convertRegexType a) (convertRegexType b)
convertRegexType (Regex r) = TyPromoted () (PromotedString () r r)
convertRegexType (RegexType r) = TyVar () (name r)
translateKeySpecification :: FunctionMap -> KeySpecification -> [Decl ()]
translateKeySpecification f k = [specTranslation]
where
rawKeyTranslation = ExpTypeSig () e t
where
kt = ignoreEither $ keyType k
ignoreEither (Right r) = r
ignoreEither _ = ".*"
e = Con () key
t = TyCon () regex <-> TyPromoted () (PromotedString () kt kt)
specTranslation = let specs = functionCandidates k
conv (t, v) = foldl (<=>) (Var () (translateUnqualPath $ renamedTySpecName t)) . catMaybes $ [translateFunctionParameter v]
sigs = map (flip M.lookup f . functionBaseName . fncFun) specs
repack Nothing _ = Nothing
repack (Just a) b = Just (a, b)
transl = conv <$> sortBy (flip compare `on` (order . fst)) (catMaybes $ zipWith repack sigs specs)
in nameBind (specificationKeyName k) $ foldr (<=>) rawKeyTranslation transl
translateFunctionParameter v = case fncPath v of
"" -> case fncStr v of
"" -> Nothing
_ -> let rgx = fncStr v
vr = Var () key
ty = TyCon () regex <-> TyPromoted () (PromotedString () rgx rgx)
in Just $ ExpTypeSig () vr ty
_ -> Just $ Var () (translateUnqualPath $ fncPath v)
translateUnqualPath = translateUnqual . pathToDeclName
mkModule :: [Decl ()] -> Module ()
mkModule = Module ()
(Just $
ModuleHead () (ModuleName () "TestSpecification") Nothing Nothing)
[LanguagePragma () [name "DataKinds", name "NoImplicitPrelude"]]
[ImportDecl {importAnn = (),
importModule = ModuleName () "Elektra.RegexType",
importQualified = False, importSrc = False, importSafe = False,
importPkg = Nothing, importAs = Nothing, importSpecs = Nothing}
,ImportDecl {importAnn = (),
importModule = ModuleName () "GHC.TypeLits",
importQualified = False, importSrc = False, importSafe = False,
importPkg = Nothing, importAs = Nothing, importSpecs = Nothing}]
renamedTySpecName :: TypeSpecification -> String
renamedTySpecName ts = fromMaybe (tySpecName ts) (rename ts)
-- AST related utilities
key :: QName ()
key = translateUnqual "Key"
regex :: QName ()
regex = translateUnqual "Regex"
specificationKeyName :: KeySpecification -> Name ()
specificationKeyName = name . pathToDeclName . path
translateUnqual :: String -> QName ()
translateUnqual n = UnQual () (name n)
pathToDeclName :: String -> String
pathToDeclName = filter isAlphaNum
(<=>) :: Exp () -> Exp () -> Exp ()
a <=> b = App () a b
infixl 5 <=>
(<->) :: Type () -> Type () -> Type ()
a <-> b = TyApp () a b
infixl 5 <->
| e1528532/libelektra | src/libs/typesystem/spectranslator/Elektra/SpecTranslator.hs | bsd-3-clause | 5,223 | 0 | 21 | 1,245 | 1,698 | 888 | 810 | 91 | 5 |
{-# LANGUAGE PatternGuards, LambdaCase #-}
module Development.NSIS.Optimise(optimise) where
import Development.NSIS.Type
import Data.Generics.Uniplate.Data
import Data.List
import Data.Maybe
-- before: secret = 1021, primes = 109
optimise :: [NSIS] -> [NSIS]
optimise =
-- allow Label 0
rep (elimDeadLabel . useLabel0) .
-- disallow Label 0
rep (elimDeadLabel . elimAfterGoto . deadAssign . assignSwitch . dullGoto . knownCompare . elimLabeledGoto . elimDeadVar)
rep :: ([NSIS] -> [NSIS]) -> [NSIS] -> [NSIS]
rep f x = g (measure x) x
where
g n1 x1 = if n2 < n1 then g n2 x2 else x2
where x2 = f $ f $ f $ f x1
n2 = measure x2
measure x = length (universeBi x :: [NSIS])
useLabel0 :: [NSIS] -> [NSIS]
useLabel0 = map (descendBi useLabel0) . f
where
f (x:Labeled next:xs)
| null (children x :: [NSIS]) -- must not be a block with nested instructions
= descendBi (\i -> if i == next then Label 0 else i) x : Labeled next : f xs
f (x:xs) = x : f xs
f [] = []
-- Label whose next statement is a good,
elimLabeledGoto :: [NSIS] -> [NSIS]
elimLabeledGoto x = transformBi f x
where
f (Labeled x) = Labeled x
f x | null (children x) = descendBi moveBounce x
| otherwise = x
moveBounce x = fromMaybe x $ lookup x bounce
bounce = flip concatMap (universe x) $ \case
Labeled x:Goto y:_ -> [(x,y)]
Labeled x:Labeled y:_ -> [(x,y)]
_ -> []
-- Delete variables which are only assigned, never read from
elimDeadVar :: [NSIS] -> [NSIS]
elimDeadVar x = transform f x
where
f (Assign x _:xs) | x `elem` unused = xs
f xs = xs
unused = nub assign \\ nub used
used = every \\ assign
every = universeBi x
assign = [x | Assign x _ <- universeBi x]
jumpy Goto{} = True
jumpy StrCmpS{} = True
jumpy IntCmp{} = True
jumpy IfErrors{} = True
jumpy IfFileExists{} = True
jumpy MessageBox{} = True
jumpy _ = False
-- Eliminate any code after a goto, until a label
elimAfterGoto :: [NSIS] -> [NSIS]
elimAfterGoto x = transformBi f x
where
f (x:xs) | jumpy x = x : g xs
f x = x
g (Labeled x:xs) = Labeled x:xs
g (x:xs) = g xs
g x = x
-- Be careful to neither introduce or remove label based errors
elimDeadLabel :: [NSIS] -> [NSIS]
elimDeadLabel x = transform f x
where
f (Labeled x:xs) | x `elem` unused = xs
f xs = xs
unused = nub label \\ nub gotos
gotos = every \\ label
every = universeBi x
label = [x | Labeled x <- universeBi x]
dullGoto :: [NSIS] -> [NSIS]
dullGoto = transform f
where
f (Goto l1:Labeled l2:xs) | l1 == l2 = Labeled l2 : xs
f x = x
-- A tricky one! Comparison after jump
knownCompare :: [NSIS] -> [NSIS]
knownCompare x = transform f x
where
f (Assign var val : StrCmpS a b yes no : xs)
| a == [Var_ var], Just eq <- isEqual b val
= Assign var val : Goto (if eq then yes else no) : xs
-- grows, but only a finite amount
f (Assign var val : Labeled l : StrCmpS a b yes no : xs)
| a == [Var_ var], Just eq <- isEqual b val
= Assign var val : Goto (if eq then yes else no) : Labeled l : StrCmpS a b yes no : xs
f (Assign var val : c : xs) | jumpy c = Assign var val : transformBi g c : xs
where
g l | Just (StrCmpS a b yes no) <- lookup l cmps
, a == [Var_ var], Just eq <- isEqual b val
= if eq then yes else no
g l = l
f x = x
cmps = [(l,cmp) | Labeled l : cmp@StrCmpS{} : _ <- universeBi x]
isEqual :: Val -> Val -> Maybe Bool
isEqual x y | x == y = Just True
| isLit x, isLit y = Just False
| otherwise = Nothing
where
isLit = all isLiteral
isLiteral Literal{} = True
isLiteral _ = False
assignSwitch :: [NSIS] -> [NSIS]
assignSwitch = transform f
where
-- this rule just switches the assignment, back and forth, ad infinitum
-- not very principled!
f (IntOp out1 a b c : Assign other ([Var_ out2]) : xs)
| out1 == out2
= IntOp other a b c : Assign out1 ([Var_ other]) : xs
f x = x
deadAssign :: [NSIS] -> [NSIS]
deadAssign = transform f
where
f (Assign v x:xs) | isDead v xs = xs
f xs = xs
isDead v (Labeled _:xs) = isDead v xs
isDead v (Assign v2 x:xs) = v `notElem` universeBi x && (v == v2 || isDead v xs)
isDead v _ = False
| ndmitchell/nsis | src/Development/NSIS/Optimise.hs | bsd-3-clause | 4,668 | 0 | 15 | 1,566 | 1,915 | 963 | 952 | 102 | 8 |
{-# LANGUAGE DeriveGeneric #-}
{-# LANGUAGE DeriveAnyClass #-}
{-# LANGUAGE DeriveDataTypeable #-}
{-# LANGUAGE ScopedTypeVariables #-}
{-# OPTIONS_GHC -fplugin Brisk.Plugin #-}
{-# OPTIONS_GHC -fplugin-opt Brisk.Plugin:main #-}
module Scratch where
import Control.Distributed.Process
import Data.Binary
import Data.Typeable
import GHC.Generics (Generic)
data PingMessage = Ping ProcessId | Pong ProcessId
deriving (Typeable, Generic)
instance Binary PingMessage
pingPong :: ProcessId -> PingMessage -> Process ()
pingPong x (Ping q) = send q (Pong x)
pingPong x _ = expect >>= pingPong x
main :: Process ()
main = do me <- getSelfPid
msg <- expect
pingPong me msg
| abakst/brisk-prelude | examples/data00.hs | bsd-3-clause | 737 | 0 | 8 | 165 | 167 | 89 | 78 | 21 | 1 |
{-# OPTIONS_GHC -fplugin Brisk.Plugin #-}
module LookAtAnnotations where
import AnnotateMe
foo :: Int
foo = AnnotateMe.x
| abakst/brisk-prelude | examples/LookAtAnnotations.hs | bsd-3-clause | 123 | 0 | 5 | 18 | 20 | 13 | 7 | 5 | 1 |
module GPP where
import Data.Generics.Strafunski.StrategyLib.StrategyLib
import Text.PrettyPrint.HughesPJ
import Control.Monad.Identity
-- | The type of generic pretty-printers (universally quantified).
type GPP = forall a . Term a => a -> Doc
-- | The type of generic pretty-printers (as TU strategy)
type TUDoc = TU Doc Identity
-- | Class of pre-fix-pointed pretty-printers (overloaded)
class PP a where
pp :: GPP -> a -> Doc
-- | Type of updatable pretty-printers.
type UPP = GPP -> TUDoc
-- | Helper function for pretty-printing lists.
gppList :: Term a => GPP -> [a] -> Doc
gppList gpp xs
= sep (map gpp xs)
-- | Helper function for pretty-printing separator lists.
gppListSep :: (Term sep, Term a) => GPP -> sep -> [a] -> Doc
gppListSep gpp sp xs
= sep (punctuate (gpp sp) (map gpp xs))
-- | Helper function for pretty-printing optionals.
gppMaybe :: Term a => GPP -> Maybe a -> Doc
gppMaybe gpp xs
= maybe empty gpp xs
-- | Helper function for pretty-printing alternatives.
gppEither :: (Term a, Term b) => GPP -> Either a b -> Doc
gppEither gpp xs
= either gpp gpp xs
-- | Render with the fix-point of a pre-fix-point pretty-printer
renderFix :: Term a => UPP -> a -> String
renderFix gpp2tudoc
= render . gpp
where
gpp :: GPP
gpp = runIdentity . applyTU (gpp2tudoc gpp)
-- | Render with the fix-point of a pre-fix-point pretty-printer
renderFixMode :: Term a => Mode -> UPP -> a -> String
renderFixMode renderMode gpp2tudoc
= renderStyle style . gpp
where
style = defaultStyle { mode = renderMode }
defaultStyle = Style PageMode 100 1.5
gpp :: GPP
gpp = runIdentity . applyTU (gpp2tudoc gpp)
-- | For easy type annotation
type MonoPP a = a -> Doc
-- | For easy non-monadic adhoc
adhocQ :: (Term t, Monad m) => TU a m -> (t -> a) -> TU a m
adhocQ f g = adhocTU f (return . g)
-- | Instance to deal with lexical sorts and literals,
-- which are all represented by String.
instance PP String where
pp gpp = text
| jkoppel/Strafunski-Sdf2Haskell | generator/GPP.hs | bsd-3-clause | 1,997 | 0 | 10 | 431 | 551 | 294 | 257 | -1 | -1 |
--------------------------------------------------------------------------------
-- |
-- Module : Graphics.Rendering.OpenGL.Raw.EXT.Convolution
-- Copyright : (c) Sven Panne 2013
-- License : BSD3
--
-- Maintainer : Sven Panne <svenpanne@gmail.com>
-- Stability : stable
-- Portability : portable
--
-- All raw functions and tokens from the EXT_convolution extension, see
-- <http://www.opengl.org/registry/specs/EXT/convolution.txt>.
--
--------------------------------------------------------------------------------
module Graphics.Rendering.OpenGL.Raw.EXT.Convolution (
-- * Functions
glConvolutionFilter1D,
glConvolutionFilter2D,
glCopyConvolutionFilter1D,
glCopyConvolutionFilter2D,
glGetConvolutionFilter,
glSeparableFilter2D,
glGetSeparableFilter,
glConvolutionParameteri,
glConvolutionParameteriv,
glConvolutionParameterf,
glConvolutionParameterfv,
glGetConvolutionParameteriv,
glGetConvolutionParameterfv,
-- * Tokens
gl_CONVOLUTION_1D,
gl_CONVOLUTION_2D,
gl_SEPARABLE_2D,
gl_CONVOLUTION_BORDER_MODE,
gl_CONVOLUTION_FILTER_SCALE,
gl_CONVOLUTION_FILTER_BIAS,
gl_REDUCE,
gl_CONVOLUTION_FORMAT,
gl_CONVOLUTION_WIDTH,
gl_CONVOLUTION_HEIGHT,
gl_MAX_CONVOLUTION_WIDTH,
gl_MAX_CONVOLUTION_HEIGHT,
gl_POST_CONVOLUTION_RED_SCALE,
gl_POST_CONVOLUTION_GREEN_SCALE,
gl_POST_CONVOLUTION_BLUE_SCALE,
gl_POST_CONVOLUTION_ALPHA_SCALE,
gl_POST_CONVOLUTION_RED_BIAS,
gl_POST_CONVOLUTION_GREEN_BIAS,
gl_POST_CONVOLUTION_BLUE_BIAS,
gl_POST_CONVOLUTION_ALPHA_BIAS
) where
import Graphics.Rendering.OpenGL.Raw.ARB.Compatibility
| mfpi/OpenGLRaw | src/Graphics/Rendering/OpenGL/Raw/EXT/Convolution.hs | bsd-3-clause | 1,643 | 0 | 4 | 222 | 136 | 98 | 38 | 35 | 0 |
{-# LANGUAGE FlexibleContexts #-}
{-# LANGUAGE FlexibleInstances #-}
{-# LANGUAGE GADTs #-}
{-# LANGUAGE OverloadedStrings #-}
{-# LANGUAGE RankNTypes #-}
{-# LANGUAGE TypeFamilies #-}
module Check.Snmp where
import Check.Snmp.Snmp
import Control.Exception
import Data.ByteString (ByteString)
import Data.List hiding (lookup, stripPrefix)
import Data.Map.Strict (keys, lookup, singleton, unions)
import qualified Data.Map.Strict as Map
import Data.Maybe
import Data.Text (Text, unpack)
import qualified Data.Yaml as A
import Network.Protocol.Snmp hiding (Value, oid)
import Network.Snmp.Client hiding (oid)
import Prelude hiding (lookup)
import System.Cron
import Types hiding (config)
-- import Debug.Trace
interfacesOid :: ByteString
interfacesOid = "1.3.6.1.2.1.2.2.1"
diskOid :: ByteString
diskOid = "1.3.6.1.2.1.25.2.3.1"
newtype Snmp = Snmp Text deriving Show
instance Checkable Snmp where
describe _ = []
route (Rules vRules) (Snmp _) = unions $ map (\x -> singleton x $ doSnmp (fromJust $ lookup x vRules)) $ keys vRules
routeCheck _ a@(Snmp x) = routeCheck' a x
doSnmp :: SnmpDefinition -> Check -> IO Complex
doSnmp vSnmpDefinition (Check _ (Hostname vHostname) _ _ _) = do
r <- bracket (client ((config vSnmpDefinition) { hostname = unpack vHostname }))
close
(flip bulkwalk [oid vSnmpDefinition])
return $ complex vSnmpDefinition r
complex :: SnmpDefinition -> Suite -> A.Value
complex vSnmpDefinition (Suite vSuite) =
let size = length (oid vSnmpDefinition)
shorted = map rulesToObject $ splitByI $ map conv vSuite
splitByI = Map.elems . Map.fromListWith (\[x] y -> x:y)
convertAlias x = to . replaceAlias x . convertFun x
rulesToObject ((Just vRule, vValue):xs) = (simple vRule A..= (convertAlias vRule vValue :: A.Value)) : rulesToObject xs
rulesToObject ((Nothing, _):xs) = rulesToObject xs
rulesToObject [] = []
conv (Coupla o v) =
let t:i:_ = drop size o
convertRule = lookup t (names vSnmpDefinition)
in (i, [(convertRule, v)])
in A.array $ map A.object shorted
testSnmp :: Check
testSnmp = Check (CheckName "test") (Hostname "salt") (Cron daily) "snmp" $ A.object []
testConf :: Config
testConf = ConfigV3 {hostname = "salt", port = "161", timeout = 5000000, sequrityName = "aes", authPass = "helloallhello", privPass = "helloallhello", sequrityLevel = AuthPriv, context = "", authType = SHA, privType = AES}
| chemist/fixmon | src/Check/Snmp.hs | bsd-3-clause | 2,724 | 0 | 15 | 729 | 828 | 455 | 373 | 54 | 3 |
{-# LANGUAGE GADTs #-}
{-# LANGUAGE MultiParamTypeClasses #-}
{-# LANGUAGE FlexibleContexts #-}
{-# LANGUAGE FlexibleInstances #-}
{-# LANGUAGE DeriveGeneric #-}
{-# LANGUAGE DeriveDataTypeable #-}
{-# LANGUAGE GeneralizedNewtypeDeriving #-}
{-# LANGUAGE OverloadedStrings #-}
{-# LANGUAGE QuasiQuotes #-}
{-# LANGUAGE TemplateHaskell #-}
{-# LANGUAGE TypeFamilies #-}
module Data.Derived where
import Database.Persist
import Database.Persist.TH
import Database.Persist.Sqlite
import Control.Monad.IO.Class (liftIO)
import Control.Monad.Except
import Control.Monad.Reader
import Data.Typeable
import Data.Data
import qualified Data.ByteString as B
import qualified Data.Text as T
import Data.Word
import Data.Time.Clock
data LinkType = Related | Reply
deriving (Show, Read, Eq, Ord, Data, Typeable, Enum)
derivePersistField "LinkType"
| edgarklerks/hforum | src/Data/Derived.hs | bsd-3-clause | 850 | 0 | 6 | 109 | 142 | 91 | 51 | 27 | 0 |
-- Copyright (c) 2012, Christoph Pohl BSD License (see
-- http://www.opensource.org/licenses/BSD-3-Clause)
-------------------------------------------------------------------------------
--
-- Project Euler Problem 2
--
-- Each new term in the Fibonacci sequence is generated by adding the previous
-- two terms. By starting with 1 and 2, the first 10 terms will be: 1, 2, 3, 5,
-- 8, 13, 21, 34, 55, 89, ... By considering the terms in the Fibonacci
-- sequence whose values do not exceed four million, find the sum of the
-- even-valued terms.
module Main where
main :: IO ()
main = print result
result = sum (takeWhile (<4000000) (filter even (map fibonacci [1..])))
fibonacci :: Integer -> Integer
fibonacci x | x==1 = 1
| x==2 = 2
| otherwise = fibonacci(x-1) + fibonacci(x-2)
| Psirus/euler | src/euler002.hs | bsd-3-clause | 795 | 0 | 12 | 139 | 145 | 79 | 66 | 8 | 1 |
{-# LANGUAGE OverloadedStrings, DataKinds #-}
module Main where
import Model
import Site
import Web.Spock.Safe
import Network.Wai.Middleware.Static
import Network.Wai.Middleware.RequestLogger
import qualified Data.Text as T
-- ?:
import Database.Persist.Sqlite hiding (get)
import Control.Monad
import Control.Monad.Logger
import Control.Monad.IO.Class
import Control.Monad.Trans.Resource
import Data.Text (Text, pack)
import qualified Data.Text.Lazy as TL
import Text.Blaze.Html.Renderer.Text (renderHtml)
import qualified Text.Blaze.Html5 as H
import qualified Text.Blaze.Bootstrap as H
import Web.Spock.Digestive
import qualified Text.Digestive as FF
import qualified Text.Digestive.Bootstrap as F
------------------------------------------
-- | Main:
------------------------------------------
main :: IO ()
main = do
pool <- runNoLoggingT $ createSqlitePool "database.db" 5
runNoLoggingT $ runSqlPool (runMigration migrateModel) pool
app pool
------------------------------------------
-- | App:
------------------------------------------
app :: ConnectionPool -> IO ()
app pool = runSpock 4444 $ spock (defaultSpockCfg Nothing (PCPool pool) ()) $ do
-- | Middleware:
middleware logStdoutDev
middleware (staticPolicy (addBase "static"))
-- | Routes:
get root homePage
get recordRoute recordPage
get secretRoute secretPage
-- | Auth:
getpost "/login" loginAction
get "logout" $ writeSession Nothing >> redirect "/"
------------------------------------------
-- | Routes:
------------------------------------------
recordRoute :: Path '[Int]
recordRoute = "record" <//> var
secretRoute :: Path '[]
secretRoute = "secret"
------------------------------------------
-- | Pages:
------------------------------------------
homePage :: MonadIO m => ActionT m ()
homePage = blaze $ H.h1 "the home page"
recordPage :: MonadIO m => Int -> ActionT m ()
recordPage id = text $ pack $ show id
secretPage :: MonadIO m => ActionT m ()
secretPage = html "<h1>the secret page</h1>"
-- secretPage :: SpockAction conn Session st ()
-- secretPage = do
-- sess <- readSession
-- when (isNothing sess) $ redirect "/login"
-- site $ H.h1 "the secret"
-- secretPage :: SpockAction conn Session st ()
-- secretPage = do
-- records <- runSQL $ selectList [] []
-- site $
-- do H.h1 "records"
-- H.ul $
-- forM_ records $ \record ->
-- H.li $ H.toHtml (recordTitle $ entityVal record)
------------------------------------------
-- | Actions:
------------------------------------------
loginAction :: SpockAction conn Session st ()
loginAction = do
let formView = F.renderForm loginFormSpec
f <- runForm "loginForm" loginForm
case f of
(view, Nothing) ->
site $ formView view
(view, Just loginReq) ->
if lrUser loginReq == "admin" && lrPassword loginReq == "assword"
then do sessionRegenerateId
writeSession (Just $ lrUser loginReq)
redirect "/secret"
else site $
do H.alertBox H.BootAlertDanger "login failed"
formView view
type Username = T.Text
type Session = Maybe Username
site :: H.Html -> SpockAction conn Session st ()
site ct =
do sess <- readSession
let sv = SiteView sess
blaze $ siteView sv ct
------------------------------------------
-- | Forms:
------------------------------------------
data LoginRequest = LoginRequest
{ lrUser :: T.Text
, lrPassword :: T.Text
} deriving (Show)
loginFormSpec :: F.FormMeta
loginFormSpec = F.FormMeta
{ F.fm_method = POST
, F.fm_target = "/login"
, F.fm_elements =
[ F.FormElement "name" (Just "Username") F.InputText
, F.FormElement "password" (Just "Password") F.InputPassword
]
, F.fm_submitText = "Login"
}
------------------------------------------
-- | Validation:
------------------------------------------
minMaxLen :: (Int, Int) -> T.Text -> FF.Result H.Html T.Text
minMaxLen (minLen, maxLen) t =
if len >= minLen && len <= maxLen
then FF.Success stripped
else FF.Error $ H.toHtml $
"Must be longer than " ++ show minLen
++ " and shorter than "
++ show maxLen ++ " characters"
where
stripped = T.strip t
len = T.length stripped
loginForm :: Monad m => FF.Form H.Html m LoginRequest
loginForm = LoginRequest
<$> "name" FF..: FF.validate (minMaxLen(3, 12)) (FF.text Nothing)
<*> "password" FF..: FF.validate (minMaxLen(6, 20)) (FF.text Nothing)
------------------------------------------
-- | Units:
------------------------------------------
blaze :: MonadIO m => H.Html -> ActionT m ()
blaze = html . TL.toStrict . renderHtml
-- runSQL :: (HasSpock m, SpockConn m ~ SqlBackend) => SqlPersistT (NoLoggingT (ResourceT IO)) a -> m a
-- runSQL action =
-- runQuery $ \conn ->
-- runResourceT $ runNoLoggingT $ runSqlConn action conn
-- checkSession :: SpockActionCtx ctx SqlBackend Session st ()
-- checkSession = do
-- sess <- readSession
-- -- when (isNothing sess) $ redirect "/login"
-- mUser <- getUserFromSession sess
-- case mUser of
-- Nothing -> text "Sorry, no access!"
-- Just user -> return (user :&: oldCtx)
------------------------------------------
-- | The End.
------------------------------------------
| denoptic/app | src/Main.hs | bsd-3-clause | 5,496 | 0 | 16 | 1,212 | 1,164 | 634 | 530 | 95 | 3 |
-- Copyright (c) 2016-present, Facebook, Inc.
-- All rights reserved.
--
-- This source code is licensed under the BSD-style license found in the
-- LICENSE file in the root directory of this source tree.
{-# LANGUAGE GADTs #-}
{-# LANGUAGE LambdaCase #-}
{-# LANGUAGE OverloadedStrings #-}
module Duckling.Distance.RO.Rules
( rules
) where
import Data.HashMap.Strict (HashMap)
import Data.String
import Data.Text (Text)
import Prelude
import qualified Data.HashMap.Strict as HashMap
import qualified Data.Text as Text
import Duckling.Dimensions.Types
import Duckling.Distance.Helpers
import Duckling.Regex.Types
import Duckling.Types
import qualified Duckling.Distance.Types as TDistance
unitMap :: HashMap Text TDistance.Unit
unitMap = HashMap.fromList
[ ( "cm", TDistance.Centimetre )
, ( "centimetri", TDistance.Centimetre )
, ( "centimetru", TDistance.Centimetre )
, ( "picior", TDistance.Foot )
, ( "picioare", TDistance.Foot )
, ( "inch", TDistance.Inch )
, ( "inchi", TDistance.Inch )
, ( "inci", TDistance.Inch )
, ( "km", TDistance.Kilometre )
, ( "kilometri", TDistance.Kilometre )
, ( "kilometru", TDistance.Kilometre )
, ( "m", TDistance.Metre )
, ( "metri", TDistance.Metre )
, ( "metru", TDistance.Metre )
, ( "mila", TDistance.Mile )
, ( "milă", TDistance.Mile )
, ( "mile", TDistance.Mile )
, ( "y", TDistance.Yard )
, ( "yar", TDistance.Yard )
, ( "yard", TDistance.Yard )
, ( "yarzi", TDistance.Yard )
, ( "yd", TDistance.Yard )
, ( "yzi", TDistance.Yard )
]
ruleLatentDistUnit :: Rule
ruleLatentDistUnit = Rule
{ name = "<latent dist> foot/inch/yard/meter/kilometer/centimeter"
, pattern =
[ dimension Distance
, regex "(inc(hi?|i)|(centi|kilo)?metr[iu]|mil[eaă]|[ck]?m|picio(are|r)|y(ar)?(zi|d)?)"
]
, prod = \case
(Token Distance dd:
Token RegexMatch (GroupMatch (match:_)):
_) -> do
x <- HashMap.lookup (Text.toLower match) unitMap
Just . Token Distance $ withUnit x dd
_ -> Nothing
}
ruleLatentDistDeUnit :: Rule
ruleLatentDistDeUnit = Rule
{ name = "<latent dist> foot/inch/yard/meter/kilometer/centimeter"
, pattern =
[ dimension Distance
, regex "de (inc(hi?|i)|(centi|kilo)?metr[iu]|mil[eaă]|[ck]?m|picio(are|r)|y(ar)?(zi|d)?)"
]
, prod = \case
(Token Distance dd:
Token RegexMatch (GroupMatch (match:_)):
_) -> do
x <- HashMap.lookup (Text.toLower match) unitMap
Just . Token Distance $ withUnit x dd
_ -> Nothing
}
rules :: [Rule]
rules =
[ ruleLatentDistUnit
, ruleLatentDistDeUnit
]
| facebookincubator/duckling | Duckling/Distance/RO/Rules.hs | bsd-3-clause | 2,613 | 0 | 17 | 524 | 655 | 388 | 267 | 71 | 2 |
-- import the reflex-host library and reflex itself
import Reflex.Host.App
import Reflex
import Control.Concurrent (forkIO)
import Control.Monad (forever)
import Control.Monad.IO.Class (liftIO)
-- The application should be generic in the host monad that is used
app :: MonadAppHost t m => m ()
app = do
(inputEvent, inputFire) <- newExternalEvent
liftIO . forkIO . forever $ getLine >>= inputFire
performEvent_ $ fmap (liftIO . putStrLn) inputEvent
main :: IO ()
main = runSpiderHost $ hostApp app
| bennofs/reflex-host | example.hs | bsd-3-clause | 506 | 0 | 10 | 83 | 141 | 76 | 65 | 12 | 1 |
module Lib where
import Data.Binary
import qualified Data.ByteString.Lazy as L
import Data.Typeable (Typeable)
import GHC.Packing
wrapToBinary :: (Typeable a) => a -> IO L.ByteString
wrapToBinary a = trySerialize a >>= return . encode
unwrapFromBinary :: (Typeable a) => L.ByteString -> IO a
unwrapFromBinary = deserialize . decode
wrapToString :: (Typeable a) => a -> IO String
wrapToString a = trySerialize a >>= return . show
unwrapFromString :: (Typeable a) => String -> IO a
unwrapFromString = deserialize . read
| michaxm/packman-exploration | src/Lib.hs | bsd-3-clause | 525 | 0 | 8 | 86 | 177 | 96 | 81 | 13 | 1 |
{-# LANGUAGE BangPatterns, CPP #-}
-- | A CSV parser. The parser defined here is RFC 4180 compliant, with
-- the following extensions:
--
-- * Empty lines are ignored.
--
-- * Non-escaped fields may contain any characters except
-- double-quotes, commas, carriage returns, and newlines.
--
-- * Escaped fields may contain any characters (but double-quotes
-- need to be escaped).
--
-- The functions in this module can be used to implement e.g. a
-- resumable parser that is fed input incrementally.
module Data.Csv.Parser
( DecodeOptions(..)
, defaultDecodeOptions
, csv
, csvWithHeader
, header
, record
, name
, field
) where
import Data.ByteString.Builder (byteString, toLazyByteString, charUtf8)
import Control.Applicative (optional)
import Data.Attoparsec.ByteString.Char8 (char, endOfInput)
import qualified Data.Attoparsec.ByteString as A
import qualified Data.Attoparsec.Lazy as AL
import qualified Data.Attoparsec.Zepto as Z
import qualified Data.ByteString as S
import qualified Data.ByteString.Unsafe as S
import qualified Data.Vector as V
import Data.Word (Word8)
import Data.Csv.Types
import Data.Csv.Util ((<$!>), blankLine, endOfLine, liftM2', cr, newline, doubleQuote, toStrict)
#if !MIN_VERSION_base(4,8,0)
import Control.Applicative ((<$>), (*>), (<*), pure)
import Data.Monoid (mappend, mempty)
#endif
-- | Options that controls how data is decoded. These options can be
-- used to e.g. decode tab-separated data instead of comma-separated
-- data.
--
-- To avoid having your program stop compiling when new fields are
-- added to 'DecodeOptions', create option records by overriding
-- values in 'defaultDecodeOptions'. Example:
--
-- > myOptions = defaultDecodeOptions {
-- > decDelimiter = fromIntegral (ord '\t')
-- > }
data DecodeOptions = DecodeOptions
{ -- | Field delimiter.
decDelimiter :: {-# UNPACK #-} !Word8
} deriving (Eq, Show)
-- | Decoding options for parsing CSV files.
defaultDecodeOptions :: DecodeOptions
defaultDecodeOptions = DecodeOptions
{ decDelimiter = 44 -- comma
}
-- | Parse a CSV file that does not include a header.
csv :: DecodeOptions -> AL.Parser Csv
csv !opts = do
vals <- sepByEndOfLine1' (record (decDelimiter opts))
_ <- optional endOfLine
endOfInput
let nonEmpty = removeBlankLines vals
return $! V.fromList nonEmpty
{-# INLINE csv #-}
-- | Specialized version of 'sepBy1'' which is faster due to not
-- accepting an arbitrary separator.
sepByDelim1' :: AL.Parser a
-> Word8 -- ^ Field delimiter
-> AL.Parser [a]
sepByDelim1' p !delim = liftM2' (:) p loop
where
loop = do
mb <- A.peekWord8
case mb of
Just b | b == delim -> liftM2' (:) (A.anyWord8 *> p) loop
_ -> pure []
{-# INLINE sepByDelim1' #-}
-- | Specialized version of 'sepBy1'' which is faster due to not
-- accepting an arbitrary separator.
sepByEndOfLine1' :: AL.Parser a
-> AL.Parser [a]
sepByEndOfLine1' p = liftM2' (:) p loop
where
loop = do
mb <- A.peekWord8
case mb of
Just b | b == cr ->
liftM2' (:) (A.anyWord8 *> A.word8 newline *> p) loop
| b == newline ->
liftM2' (:) (A.anyWord8 *> p) loop
_ -> pure []
{-# INLINE sepByEndOfLine1' #-}
-- | Parse a CSV file that includes a header.
csvWithHeader :: DecodeOptions -> AL.Parser (Header, V.Vector NamedRecord)
csvWithHeader !opts = do
!hdr <- header (decDelimiter opts)
vals <- map (toNamedRecord hdr) . removeBlankLines <$>
sepByEndOfLine1' (record (decDelimiter opts))
_ <- optional endOfLine
endOfInput
let !v = V.fromList vals
return (hdr, v)
-- | Parse a header, including the terminating line separator.
header :: Word8 -- ^ Field delimiter
-> AL.Parser Header
header !delim = V.fromList <$!> name delim `sepByDelim1'` delim <* endOfLine
-- | Parse a header name. Header names have the same format as regular
-- 'field's.
name :: Word8 -> AL.Parser Name
name !delim = field delim
removeBlankLines :: [Record] -> [Record]
removeBlankLines = filter (not . blankLine)
-- | Parse a record, not including the terminating line separator. The
-- terminating line separate is not included as the last record in a
-- CSV file is allowed to not have a terminating line separator. You
-- most likely want to use the 'endOfLine' parser in combination with
-- this parser.
record :: Word8 -- ^ Field delimiter
-> AL.Parser Record
record !delim = V.fromList <$!> field delim `sepByDelim1'` delim
{-# INLINE record #-}
-- | Parse a field. The field may be in either the escaped or
-- non-escaped format. The return value is unescaped.
field :: Word8 -> AL.Parser Field
field !delim = do
mb <- A.peekWord8
-- We purposely don't use <|> as we want to commit to the first
-- choice if we see a double quote.
case mb of
Just b | b == doubleQuote -> escapedField
_ -> unescapedField delim
{-# INLINE field #-}
escapedField :: AL.Parser S.ByteString
escapedField = do
_ <- dquote
-- The scan state is 'True' if the previous character was a double
-- quote. We need to drop a trailing double quote left by scan.
s <- S.init <$> (A.scan False $ \s c -> if c == doubleQuote
then Just (not s)
else if s then Nothing
else Just False)
if doubleQuote `S.elem` s
then case Z.parse unescape s of
Right r -> return r
Left err -> fail err
else return s
unescapedField :: Word8 -> AL.Parser S.ByteString
unescapedField !delim = A.takeWhile (\ c -> c /= doubleQuote &&
c /= newline &&
c /= delim &&
c /= cr)
dquote :: AL.Parser Char
dquote = char '"'
unescape :: Z.Parser S.ByteString
unescape = (toStrict . toLazyByteString) <$!> go mempty where
go acc = do
h <- Z.takeWhile (/= doubleQuote)
let rest = do
start <- Z.take 2
if (S.unsafeHead start == doubleQuote &&
S.unsafeIndex start 1 == doubleQuote)
then go (acc `mappend` byteString h `mappend` charUtf8 '"')
else fail "invalid CSV escape sequence"
done <- Z.atEnd
if done
then return (acc `mappend` byteString h)
else rest
| hvr/cassava | src/Data/Csv/Parser.hs | bsd-3-clause | 6,584 | 0 | 20 | 1,792 | 1,413 | 764 | 649 | 120 | 5 |
{-# LANGUAGE PackageImports #-}
{-# LANGUAGE GeneralizedNewtypeDeriving #-}
{-# LANGUAGE QuasiQuotes #-}
module Ivory.Compile.C.Types where
import Language.C.Quote.GCC
import qualified "language-c-quote" Language.C.Syntax as C
import MonadLib (WriterT,Id,put)
import Data.Monoid
import Control.Applicative
import qualified Data.Set as S
--------------------------------------------------------------------------------
data Include
= SysInclude FilePath -- ^ @#include <foo.h>@
| LocalInclude FilePath -- ^ @#include "foo.h"@
deriving (Show,Eq,Ord)
includeDef :: Include -> C.Definition
includeDef incl = case incl of
SysInclude file -> [cedecl| $esc:("#include <" ++ file ++ ">") |]
LocalInclude file -> [cedecl| $esc:("#include \"" ++ file ++ "\"") |]
type Includes = S.Set Include
type Sources = [C.Definition]
data CompileUnits = CompileUnits
{ unitName :: String
, sources :: (Includes, Sources)
, headers :: (Includes, Sources)
} deriving Show
instance Monoid CompileUnits where
mempty = CompileUnits mempty mempty mempty
(CompileUnits n0 s0 h0) `mappend` (CompileUnits n1 s1 h1) =
CompileUnits (n0 `mappend` n1)
(s0 `mappend` s1)
(h0 `mappend` h1)
--------------------------------------------------------------------------------
newtype CompileM a = Compile
{ unCompile :: WriterT CompileUnits Id a }
deriving (Functor, Monad, Applicative)
type Compile = CompileM ()
--------------------------------------------------------------------------------
putSrc :: C.Definition -> Compile
putSrc def = Compile (put mempty { sources = (S.empty,[def]) })
putSrcInc :: Include -> Compile
putSrcInc inc = Compile (put mempty { sources = (S.fromList [inc],[]) })
putHdrSrc :: C.Definition -> Compile
putHdrSrc hdr = Compile (put mempty { headers = (S.empty,[hdr]) })
putHdrInc :: Include -> Compile
putHdrInc inc = Compile (put mempty { headers = (S.fromList [inc],[]) })
--------------------------------------------------------------------------------
| Hodapp87/ivory | ivory-backend-c/src/Ivory/Compile/C/Types.hs | bsd-3-clause | 2,058 | 0 | 12 | 349 | 536 | 314 | 222 | 43 | 2 |
{-# LANGUAGE PatternSynonyms #-}
--------------------------------------------------------------------------------
-- |
-- Module : Graphics.GL.EXT.PalettedTexture
-- Copyright : (c) Sven Panne 2019
-- License : BSD3
--
-- Maintainer : Sven Panne <svenpanne@gmail.com>
-- Stability : stable
-- Portability : portable
--
--------------------------------------------------------------------------------
module Graphics.GL.EXT.PalettedTexture (
-- * Extension Support
glGetEXTPalettedTexture,
gl_EXT_paletted_texture,
-- * Enums
pattern GL_COLOR_INDEX12_EXT,
pattern GL_COLOR_INDEX16_EXT,
pattern GL_COLOR_INDEX1_EXT,
pattern GL_COLOR_INDEX2_EXT,
pattern GL_COLOR_INDEX4_EXT,
pattern GL_COLOR_INDEX8_EXT,
pattern GL_TEXTURE_INDEX_SIZE_EXT,
-- * Functions
glColorTableEXT,
glGetColorTableEXT,
glGetColorTableParameterfvEXT,
glGetColorTableParameterivEXT
) where
import Graphics.GL.ExtensionPredicates
import Graphics.GL.Tokens
import Graphics.GL.Functions
| haskell-opengl/OpenGLRaw | src/Graphics/GL/EXT/PalettedTexture.hs | bsd-3-clause | 1,005 | 0 | 5 | 134 | 96 | 67 | 29 | 18 | 0 |
{-# LANGUAGE PatternSynonyms #-}
--------------------------------------------------------------------------------
-- |
-- Module : Graphics.GL.EXT.TextureBufferObject
-- Copyright : (c) Sven Panne 2019
-- License : BSD3
--
-- Maintainer : Sven Panne <svenpanne@gmail.com>
-- Stability : stable
-- Portability : portable
--
--------------------------------------------------------------------------------
module Graphics.GL.EXT.TextureBufferObject (
-- * Extension Support
glGetEXTTextureBufferObject,
gl_EXT_texture_buffer_object,
-- * Enums
pattern GL_MAX_TEXTURE_BUFFER_SIZE_EXT,
pattern GL_TEXTURE_BINDING_BUFFER_EXT,
pattern GL_TEXTURE_BUFFER_DATA_STORE_BINDING_EXT,
pattern GL_TEXTURE_BUFFER_EXT,
pattern GL_TEXTURE_BUFFER_FORMAT_EXT,
-- * Functions
glTexBufferEXT
) where
import Graphics.GL.ExtensionPredicates
import Graphics.GL.Tokens
import Graphics.GL.Functions
| haskell-opengl/OpenGLRaw | src/Graphics/GL/EXT/TextureBufferObject.hs | bsd-3-clause | 916 | 0 | 5 | 117 | 77 | 55 | 22 | 13 | 0 |
{-# LANGUAGE OverloadedStrings #-}
-- | Description : @ToJSON@ for Messages
--
-- This module contains the @ToJSON@ instance for @Message@.
module IHaskell.IPython.Message.Writer (ToJSON(..)) where
import Data.Aeson
import Data.Map (Map)
import Data.Text (Text, pack)
import Data.Monoid (mempty)
import qualified Data.ByteString.Lazy as L
import qualified Data.ByteString as B
import Data.Text.Encoding
import IHaskell.IPython.Types
-- Convert message bodies into JSON.
instance ToJSON Message where
toJSON KernelInfoReply { versionList = vers, language = language } =
object ["protocol_version" .= string "5.0" -- current protocol version, major and minor
, "language_version" .= vers, "language" .= language]
toJSON ExecuteReply { status = status, executionCounter = counter, pagerOutput = pager } =
object
[ "status" .= show status
, "execution_count" .= counter
, "payload" .=
if null pager
then []
else map mkObj pager
, "user_variables" .= emptyMap
, "user_expressions" .= emptyMap
]
where
mkObj o = object
[ "source" .= string "page"
, "line" .= Number 0
, "data" .= object [displayDataToJson o]
]
toJSON PublishStatus { executionState = executionState } =
object ["execution_state" .= executionState]
toJSON PublishStream { streamType = streamType, streamContent = content } =
object ["data" .= content, "name" .= streamType]
toJSON PublishDisplayData { source = src, displayData = datas } =
object
["source" .= src, "metadata" .= object [], "data" .= object (map displayDataToJson datas)]
toJSON PublishOutput { executionCount = execCount, reprText = reprText } =
object
[ "data" .= object ["text/plain" .= reprText]
, "execution_count" .= execCount
, "metadata" .= object []
]
toJSON PublishInput { executionCount = execCount, inCode = code } =
object ["execution_count" .= execCount, "code" .= code]
toJSON (CompleteReply _ matches start end metadata status) =
object
[ "matches" .= matches
, "cursor_start" .= start
, "cursor_end" .= end
, "metadata" .= metadata
, "status" .= if status
then string "ok"
else "error"
]
toJSON i@InspectReply{} =
object
[ "status" .= if inspectStatus i
then string "ok"
else "error"
, "data" .= object (map displayDataToJson . inspectData $ i)
, "metadata" .= object []
, "found" .= inspectStatus i
]
toJSON ShutdownReply { restartPending = restart } =
object ["restart" .= restart]
toJSON ClearOutput { wait = wait } =
object ["wait" .= wait]
toJSON RequestInput { inputPrompt = prompt } =
object ["prompt" .= prompt]
toJSON req@CommOpen{} =
object ["comm_id" .= commUuid req, "target_name" .= commTargetName req, "data" .= commData req]
toJSON req@CommData{} =
object ["comm_id" .= commUuid req, "data" .= commData req]
toJSON req@CommClose{} =
object ["comm_id" .= commUuid req, "data" .= commData req]
toJSON req@HistoryReply{} =
object ["history" .= map tuplify (historyReply req)]
where
tuplify (HistoryReplyElement sess linum res) = (sess, linum, case res of
Left inp -> toJSON inp
Right (inp, out) -> toJSON out)
toJSON body = error $ "Do not know how to convert to JSON for message " ++ show body
-- | Print an execution state as "busy", "idle", or "starting".
instance ToJSON ExecutionState where
toJSON Busy = String "busy"
toJSON Idle = String "idle"
toJSON Starting = String "starting"
-- | Print a stream as "stdin" or "stdout" strings.
instance ToJSON StreamType where
toJSON Stdin = String "stdin"
toJSON Stdout = String "stdout"
-- | Convert a MIME type and value into a JSON dictionary pair.
displayDataToJson :: DisplayData -> (Text, Value)
displayDataToJson (DisplayData mimeType dataStr) =
pack (show mimeType) .= String dataStr
----- Constants -----
emptyMap :: Map String String
emptyMap = mempty
emptyList :: [Int]
emptyList = []
ints :: [Int] -> [Int]
ints = id
string :: String -> String
string = id
| FranklinChen/IHaskell | ipython-kernel/src/IHaskell/IPython/Message/Writer.hs | mit | 4,460 | 0 | 13 | 1,268 | 1,184 | 632 | 552 | 95 | 1 |
{-# LANGUAGE OverloadedStrings, TemplateHaskell, QuasiQuotes, TypeFamilies, ViewPatterns, MultiParamTypeClasses #-}
module Main (main, resourcesApp) where
import Network.Wai.Middleware.Routes
import Network.Wai.Handler.Warp
import qualified Data.Text.Lazy as T
import Data.Text.Lazy (Text)
import System.Environment (getArgs)
import Data.Monoid (mempty, mappend)
import Control.Concurrent (runInUnboundThread)
data App = App
mkRoute "App" [parseRoutes|
/echo EchoR:
/hello-world HelloWorldR
/plain/#Text/#Int PlainR
/deep/foo/bar/baz DeepR:
/0 Num0R
/1 Num1R
/2 Num2R
/3 Num3R
/4 Num4R
/5 Num5R
/6 Num6R
/7 Num7R
/8 Num8R
/9 Num9R
/10 Num10R
/11 Num11R
/12 Num12R
/13 Num13R
/14 Num14R
/15 Num15R
/16 Num16R
/17 Num17R
/18 Num18R
/19 Num19R
/20 Num20R
/21 Num21R
/22 Num22R
/23 Num23R
/24 Num24R
/25 Num25R
/26 Num26R
/27 Num27R
/28 Num28R
/29 Num29R
/30 Num30R
/31 Num31R
/32 Num32R
/33 Num33R
/34 Num34R
/35 Num35R
/36 Num36R
/37 Num37R
/38 Num38R
/39 Num39R
/40 Num40R
/41 Num41R
/42 Num42R
/43 Num43R
/44 Num44R
/45 Num45R
/46 Num46R
/47 Num47R
/48 Num48R
/49 Num49R
/50 Num50R
/51 Num51R
/52 Num52R
/53 Num53R
/54 Num54R
/55 Num55R
/56 Num56R
/57 Num57R
/58 Num58R
/59 Num59R
/60 Num60R
/61 Num61R
/62 Num62R
/63 Num63R
/64 Num64R
/65 Num65R
/66 Num66R
/67 Num67R
/68 Num68R
/69 Num69R
/70 Num70R
/71 Num71R
/72 Num72R
/73 Num73R
/74 Num74R
/75 Num75R
/76 Num76R
/77 Num77R
/78 Num78R
/79 Num79R
/80 Num80R
/81 Num81R
/82 Num82R
/83 Num83R
/84 Num84R
/85 Num85R
/86 Num86R
/87 Num87R
/88 Num88R
/89 Num89R
/90 Num90R
/91 Num91R
/92 Num92R
/93 Num93R
/94 Num94R
/95 Num95R
/96 Num96R
/97 Num97R
/98 Num98R
/99 Num99R
/100 Num100R
/after AfterR
|]
returnT :: Text -> Handler App
returnT = runHandlerM . text
handleNum0R :: Handler App; handleNum0R = returnT "deep"
handleNum1R :: Handler App; handleNum1R = returnT "deep"
handleNum2R :: Handler App; handleNum2R = returnT "deep"
handleNum3R :: Handler App; handleNum3R = returnT "deep"
handleNum4R :: Handler App; handleNum4R = returnT "deep"
handleNum5R :: Handler App; handleNum5R = returnT "deep"
handleNum6R :: Handler App; handleNum6R = returnT "deep"
handleNum7R :: Handler App; handleNum7R = returnT "deep"
handleNum8R :: Handler App; handleNum8R = returnT "deep"
handleNum9R :: Handler App; handleNum9R = returnT "deep"
handleNum10R :: Handler App; handleNum10R = returnT "deep"
handleNum11R :: Handler App; handleNum11R = returnT "deep"
handleNum12R :: Handler App; handleNum12R = returnT "deep"
handleNum13R :: Handler App; handleNum13R = returnT "deep"
handleNum14R :: Handler App; handleNum14R = returnT "deep"
handleNum15R :: Handler App; handleNum15R = returnT "deep"
handleNum16R :: Handler App; handleNum16R = returnT "deep"
handleNum17R :: Handler App; handleNum17R = returnT "deep"
handleNum18R :: Handler App; handleNum18R = returnT "deep"
handleNum19R :: Handler App; handleNum19R = returnT "deep"
handleNum20R :: Handler App; handleNum20R = returnT "deep"
handleNum21R :: Handler App; handleNum21R = returnT "deep"
handleNum22R :: Handler App; handleNum22R = returnT "deep"
handleNum23R :: Handler App; handleNum23R = returnT "deep"
handleNum24R :: Handler App; handleNum24R = returnT "deep"
handleNum25R :: Handler App; handleNum25R = returnT "deep"
handleNum26R :: Handler App; handleNum26R = returnT "deep"
handleNum27R :: Handler App; handleNum27R = returnT "deep"
handleNum28R :: Handler App; handleNum28R = returnT "deep"
handleNum29R :: Handler App; handleNum29R = returnT "deep"
handleNum30R :: Handler App; handleNum30R = returnT "deep"
handleNum31R :: Handler App; handleNum31R = returnT "deep"
handleNum32R :: Handler App; handleNum32R = returnT "deep"
handleNum33R :: Handler App; handleNum33R = returnT "deep"
handleNum34R :: Handler App; handleNum34R = returnT "deep"
handleNum35R :: Handler App; handleNum35R = returnT "deep"
handleNum36R :: Handler App; handleNum36R = returnT "deep"
handleNum37R :: Handler App; handleNum37R = returnT "deep"
handleNum38R :: Handler App; handleNum38R = returnT "deep"
handleNum39R :: Handler App; handleNum39R = returnT "deep"
handleNum40R :: Handler App; handleNum40R = returnT "deep"
handleNum41R :: Handler App; handleNum41R = returnT "deep"
handleNum42R :: Handler App; handleNum42R = returnT "deep"
handleNum43R :: Handler App; handleNum43R = returnT "deep"
handleNum44R :: Handler App; handleNum44R = returnT "deep"
handleNum45R :: Handler App; handleNum45R = returnT "deep"
handleNum46R :: Handler App; handleNum46R = returnT "deep"
handleNum47R :: Handler App; handleNum47R = returnT "deep"
handleNum48R :: Handler App; handleNum48R = returnT "deep"
handleNum49R :: Handler App; handleNum49R = returnT "deep"
handleNum50R :: Handler App; handleNum50R = returnT "deep"
handleNum51R :: Handler App; handleNum51R = returnT "deep"
handleNum52R :: Handler App; handleNum52R = returnT "deep"
handleNum53R :: Handler App; handleNum53R = returnT "deep"
handleNum54R :: Handler App; handleNum54R = returnT "deep"
handleNum55R :: Handler App; handleNum55R = returnT "deep"
handleNum56R :: Handler App; handleNum56R = returnT "deep"
handleNum57R :: Handler App; handleNum57R = returnT "deep"
handleNum58R :: Handler App; handleNum58R = returnT "deep"
handleNum59R :: Handler App; handleNum59R = returnT "deep"
handleNum60R :: Handler App; handleNum60R = returnT "deep"
handleNum61R :: Handler App; handleNum61R = returnT "deep"
handleNum62R :: Handler App; handleNum62R = returnT "deep"
handleNum63R :: Handler App; handleNum63R = returnT "deep"
handleNum64R :: Handler App; handleNum64R = returnT "deep"
handleNum65R :: Handler App; handleNum65R = returnT "deep"
handleNum66R :: Handler App; handleNum66R = returnT "deep"
handleNum67R :: Handler App; handleNum67R = returnT "deep"
handleNum68R :: Handler App; handleNum68R = returnT "deep"
handleNum69R :: Handler App; handleNum69R = returnT "deep"
handleNum70R :: Handler App; handleNum70R = returnT "deep"
handleNum71R :: Handler App; handleNum71R = returnT "deep"
handleNum72R :: Handler App; handleNum72R = returnT "deep"
handleNum73R :: Handler App; handleNum73R = returnT "deep"
handleNum74R :: Handler App; handleNum74R = returnT "deep"
handleNum75R :: Handler App; handleNum75R = returnT "deep"
handleNum76R :: Handler App; handleNum76R = returnT "deep"
handleNum77R :: Handler App; handleNum77R = returnT "deep"
handleNum78R :: Handler App; handleNum78R = returnT "deep"
handleNum79R :: Handler App; handleNum79R = returnT "deep"
handleNum80R :: Handler App; handleNum80R = returnT "deep"
handleNum81R :: Handler App; handleNum81R = returnT "deep"
handleNum82R :: Handler App; handleNum82R = returnT "deep"
handleNum83R :: Handler App; handleNum83R = returnT "deep"
handleNum84R :: Handler App; handleNum84R = returnT "deep"
handleNum85R :: Handler App; handleNum85R = returnT "deep"
handleNum86R :: Handler App; handleNum86R = returnT "deep"
handleNum87R :: Handler App; handleNum87R = returnT "deep"
handleNum88R :: Handler App; handleNum88R = returnT "deep"
handleNum89R :: Handler App; handleNum89R = returnT "deep"
handleNum90R :: Handler App; handleNum90R = returnT "deep"
handleNum91R :: Handler App; handleNum91R = returnT "deep"
handleNum92R :: Handler App; handleNum92R = returnT "deep"
handleNum93R :: Handler App; handleNum93R = returnT "deep"
handleNum94R :: Handler App; handleNum94R = returnT "deep"
handleNum95R :: Handler App; handleNum95R = returnT "deep"
handleNum96R :: Handler App; handleNum96R = returnT "deep"
handleNum97R :: Handler App; handleNum97R = returnT "deep"
handleNum98R :: Handler App; handleNum98R = returnT "deep"
handleNum99R :: Handler App; handleNum99R = returnT "deep"
handleNum100R :: Handler App; handleNum100R = returnT "deep"
handleHelloWorldR :: Handler App
handleHelloWorldR = returnT "Hello World"
handleAfterR :: Handler App
handleAfterR = returnT "after"
handlePlainR :: Text -> Int -> Handler App
handlePlainR p i = returnT builder
where
builder =
loop i
where
loop 0 = mempty
loop 1 = p
loop x = p `mappend` loop (x - 1)
main :: IO ()
main = do
port:_ <- getArgs
toWaiApp (route App) >>= runInUnboundThread . run (read port)
| philopon/apiary-benchmark | src/wai-routes.hs | mit | 8,531 | 0 | 12 | 1,621 | 2,005 | 1,067 | 938 | 129 | 3 |
{- |
Module : ./Modal/StatAna.hs
Copyright : (c) Christian Maeder, Uni Bremen 2004-2005
License : GPLv2 or higher, see LICENSE.txt
Maintainer : luecke@informatik.uni-bremen.de
Stability : provisional
Portability : portable
static analysis of modal logic parts
-}
module Modal.StatAna (basicModalAnalysis, minExpForm) where
import Modal.AS_Modal
import Modal.Print_AS ()
import Modal.ModalSign
import CASL.Sign
import CASL.MixfixParser
import CASL.StaticAna
import CASL.AS_Basic_CASL
import CASL.ShowMixfix
import CASL.Overload
import CASL.Quantification
import Common.AS_Annotation
import Common.GlobalAnnotations
import Common.Keywords
import Common.Lib.State
import Common.Id
import Common.Result
import Common.ExtSign
import qualified Common.Lib.MapSet as MapSet
import qualified Data.Map as Map
import qualified Data.Set as Set
import Data.List as List
import Data.Function
instance TermExtension M_FORMULA where
freeVarsOfExt sign (BoxOrDiamond _ _ f _) = freeVars sign f
basicModalAnalysis
:: (BASIC_SPEC M_BASIC_ITEM M_SIG_ITEM M_FORMULA,
Sign M_FORMULA ModalSign, GlobalAnnos)
-> Result (BASIC_SPEC M_BASIC_ITEM M_SIG_ITEM M_FORMULA,
ExtSign (Sign M_FORMULA ModalSign) Symbol,
[Named (FORMULA M_FORMULA)])
basicModalAnalysis =
basicAnalysis minExpForm ana_M_BASIC_ITEM ana_M_SIG_ITEM ana_Mix
ana_Mix :: Mix M_BASIC_ITEM M_SIG_ITEM M_FORMULA ModalSign
ana_Mix = emptyMix
{ getSigIds = ids_M_SIG_ITEM
, putParen = mapM_FORMULA
, mixResolve = resolveM_FORMULA
}
-- rigid ops will also be part of the CASL signature
ids_M_SIG_ITEM :: M_SIG_ITEM -> IdSets
ids_M_SIG_ITEM si = let e = Set.empty in case si of
Rigid_op_items _ al _ ->
(unite2 $ map (ids_OP_ITEM . item) al, e)
Rigid_pred_items _ al _ ->
((e, e), Set.unions $ map (ids_PRED_ITEM . item) al)
mapMODALITY :: MODALITY -> MODALITY
mapMODALITY m = case m of
Term_mod t -> Term_mod $ mapTerm mapM_FORMULA t
_ -> m
mapM_FORMULA :: M_FORMULA -> M_FORMULA
mapM_FORMULA (BoxOrDiamond b m f ps) =
BoxOrDiamond b (mapMODALITY m) (mapFormula mapM_FORMULA f) ps
resolveMODALITY :: MixResolve MODALITY
resolveMODALITY ga ids m = case m of
Term_mod t -> fmap Term_mod $ resolveMixTrm mapM_FORMULA
resolveM_FORMULA ga ids t
_ -> return m
resolveM_FORMULA :: MixResolve M_FORMULA
resolveM_FORMULA ga ids cf = case cf of
BoxOrDiamond b m f ps -> do
nm <- resolveMODALITY ga ids m
nf <- resolveMixFrm mapM_FORMULA resolveM_FORMULA ga ids f
return $ BoxOrDiamond b nm nf ps
minExpForm :: Min M_FORMULA ModalSign
minExpForm s form =
let minMod md ps = case md of
Simple_mod i -> minMod (Term_mod (Mixfix_token i)) ps
Term_mod t -> let
r = do
t2 <- oneExpTerm minExpForm s t
let srt = sortOfTerm t2
trm = Term_mod t2
supers = supersortsOf srt s
if Set.null $ Set.intersection
(Set.insert srt supers)
$ Map.keysSet $ termModies $ extendedInfo s
then Result [mkDiag Error
("unknown term modality sort '"
++ showId srt "' for term") t ]
$ Just trm
else return trm
in case t of
Mixfix_token tm ->
if Map.member tm (modies $ extendedInfo s)
|| tokStr tm == emptyS
then return $ Simple_mod tm
else Result
[mkDiag Error "unknown modality" tm]
$ Just $ Simple_mod tm
Application (Op_name (Id [tm] [] _)) [] _ ->
if Map.member tm (modies $ extendedInfo s)
then return $ Simple_mod tm
else r
_ -> r
in case form of
BoxOrDiamond b m f ps ->
do nm <- minMod m ps
f2 <- minExpFORMULA minExpForm s f
return $ BoxOrDiamond b nm f2 ps
ana_M_SIG_ITEM :: Ana M_SIG_ITEM M_BASIC_ITEM M_SIG_ITEM M_FORMULA ModalSign
ana_M_SIG_ITEM mix mi =
case mi of
Rigid_op_items r al ps ->
do ul <- mapM (ana_OP_ITEM minExpForm mix) al
case r of
Flexible -> mapM_ (\ aoi -> case item aoi of
Op_decl ops ty _ _ ->
mapM_ (updateExtInfo . addFlexOp (toOpType ty)) ops
Op_defn i par _ _ -> maybe (return ())
(\ ty -> updateExtInfo $ addFlexOp (toOpType ty) i)
$ headToType par) ul
Rigid -> return ()
return $ Rigid_op_items r ul ps
Rigid_pred_items r al ps ->
do ul <- mapM (ana_PRED_ITEM minExpForm mix) al
case r of
Flexible -> mapM_ (\ aoi -> case item aoi of
Pred_decl ops ty _ ->
mapM_ (updateExtInfo . addFlexPred (toPredType ty)) ops
Pred_defn i (Pred_head args _) _ _ ->
updateExtInfo $ addFlexPred
(PredType $ sortsOfArgs args) i ) ul
Rigid -> return ()
return $ Rigid_pred_items r ul ps
addFlexOp :: OpType -> Id -> ModalSign -> Result ModalSign
addFlexOp ty i m = return
m { flexOps = addOpTo i ty $ flexOps m }
addFlexPred :: PredType -> Id -> ModalSign -> Result ModalSign
addFlexPred ty i m = return
m { flexPreds = MapSet.insert i ty $ flexPreds m }
ana_M_BASIC_ITEM
:: Ana M_BASIC_ITEM M_BASIC_ITEM M_SIG_ITEM M_FORMULA ModalSign
ana_M_BASIC_ITEM mix bi = case bi of
Simple_mod_decl al fs ps -> do
mapM_ ((updateExtInfo . preAddModId) . item) al
newFs <- mapAnM (ana_FORMULA mix) fs
resFs <- mapAnM (return . fst) newFs
anaFs <- mapAnM (return . snd) newFs
mapM_ ((updateExtInfo . addModId anaFs) . item) al
return $ Simple_mod_decl al resFs ps
Term_mod_decl al fs ps -> do
e <- get
mapM_ ((updateExtInfo . preAddModSort e) . item) al
newFs <- mapAnM (ana_FORMULA mix) fs
resFs <- mapAnM (return . fst) newFs
anaFs <- mapAnM (return . snd) newFs
mapM_ ((updateExtInfo . addModSort anaFs) . item) al
return $ Term_mod_decl al resFs ps
preAddModId :: SIMPLE_ID -> ModalSign -> Result ModalSign
preAddModId i m =
let ms = modies m in
if Map.member i ms then
Result [mkDiag Hint "repeated modality" i] $ Just m
else return m { modies = Map.insert i [] ms }
addModId :: [AnModFORM] -> SIMPLE_ID -> ModalSign -> Result ModalSign
addModId frms i m = return m
{ modies = Map.insertWith List.union i frms $ modies m }
preAddModSort :: Sign M_FORMULA ModalSign -> SORT -> ModalSign
-> Result ModalSign
preAddModSort e i m =
let ms = termModies m
ds = hasSort e i
in if Map.member i ms || not (null ds) then
Result (mkDiag Hint "repeated term modality" i : ds) $ Just m
else return m { termModies = Map.insert i [] ms }
addModSort :: [AnModFORM] -> SORT -> ModalSign -> Result ModalSign
addModSort frms i m = return m
{ termModies = Map.insertWith List.union i frms $ termModies m }
ana_FORMULA :: Mix M_BASIC_ITEM M_SIG_ITEM M_FORMULA ModalSign
-> FORMULA M_FORMULA -> State (Sign M_FORMULA ModalSign)
(FORMULA M_FORMULA, FORMULA M_FORMULA)
ana_FORMULA mix f = do
let ps = map simpleIdToId $ Set.toList $ getFormPredToks f
pm <- gets predMap
mapM_ (addPred (emptyAnno ()) $ PredType []) ps
newGa <- gets globAnnos
let Result es m = resolveFormula mapM_FORMULA
resolveM_FORMULA newGa (mixRules mix) f
addDiags es
e <- get
phi <- case m of
Nothing -> return (f, f)
Just r -> do
n <- resultToState (minExpFORMULA minExpForm e) r
return (r, n)
e2 <- get
put e2 {predMap = pm}
return phi
getFormPredToks :: FORMULA M_FORMULA -> Set.Set Token
getFormPredToks frm = case frm of
Quantification _ _ f _ -> getFormPredToks f
Junction _ fs _ -> Set.unions $ map getFormPredToks fs
Relation f1 _ f2 _ -> on Set.union getFormPredToks f1 f2
Negation f _ -> getFormPredToks f
Mixfix_formula (Mixfix_token t) -> Set.singleton t
Mixfix_formula t -> getTermPredToks t
ExtFORMULA (BoxOrDiamond _ _ f _) -> getFormPredToks f
Predication _ ts _ -> Set.unions $ map getTermPredToks ts
Definedness t _ -> getTermPredToks t
Equation t1 _ t2 _ -> on Set.union getTermPredToks t1 t2
Membership t _ _ -> getTermPredToks t
_ -> Set.empty
getTermPredToks :: TERM M_FORMULA -> Set.Set Token
getTermPredToks trm = case trm of
Application _ ts _ -> Set.unions $ map getTermPredToks ts
Sorted_term t _ _ -> getTermPredToks t
Cast t _ _ -> getTermPredToks t
Conditional t1 f t2 _ -> Set.union (getTermPredToks t1) $
Set.union (getFormPredToks f) $ getTermPredToks t2
Mixfix_term ts -> Set.unions $ map getTermPredToks ts
Mixfix_parenthesized ts _ -> Set.unions $ map getTermPredToks ts
Mixfix_bracketed ts _ -> Set.unions $ map getTermPredToks ts
Mixfix_braced ts _ -> Set.unions $ map getTermPredToks ts
_ -> Set.empty
| spechub/Hets | Modal/StatAna.hs | gpl-2.0 | 9,729 | 0 | 26 | 3,215 | 3,000 | 1,450 | 1,550 | 213 | 12 |
{-# LANGUAGE BangPatterns, ScopedTypeVariables #-}
{-
- Qoropa -- Love Your Mail!
- Copyright © 2010 Ali Polatel
- Based in part upon XMonad which is:
- Copyright (c) 2007 Spencer Janssen
- Based in part upon gregorycollins/homepage which is:
- Copyright (C) 2009 Gregory Collins
-
- This file is part of the Qoropa mail reader. Qoropa is free software;
- you can redistribute it and/or modify it under the terms of the GNU General
- Public License version 2, as published by the Free Software Foundation.
-
- Qoropa is distributed in the hope that it will be useful, but WITHOUT ANY
- WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR
- A PARTICULAR PURPOSE. See the GNU General Public License for more details.
-
- You should have received a copy of the GNU General Public License along with
- this program; if not, write to the Free Software Foundation, Inc., 59 Temple
- Place, Suite 330, Boston, MA 02111-1307 USA
-
- Author: Ali Polatel <alip@exherbo.org>
-}
module Qoropa.Util
( beep
, expandTilde
, getQoropaDir
, relativeTime
, recompile
) where
import Prelude hiding (catch)
import Control.Applicative ((<$>))
import Control.Exception (catch, bracket, SomeException(..))
import Control.Monad (filterM)
import Data.List ((\\))
import Data.Time
( TimeZone, UTCTime, NominalDiffTime
, diffUTCTime, utcToLocalTime, formatTime
, getCurrentTime, getCurrentTimeZone
)
import Data.Time.Clock.POSIX (posixSecondsToUTCTime)
import Foreign.C.Types (CTime)
import System.Locale (defaultTimeLocale)
import System.Exit (ExitCode(..))
import System.IO (openFile, IOMode(..), hClose)
import System.Info (arch, os)
import System.FilePath.Posix ((</>))
import System.Process (runProcess, waitForProcess)
import System.Directory
( doesDirectoryExist
, getDirectoryContents, getAppUserDataDirectory, getModificationTime
, getHomeDirectory
)
beep :: IO ()
beep = putChar '\a'
expandTilde :: FilePath -> IO FilePath
expandTilde ('~':'/':xs) = do
home <- getHomeDirectory
return $ home </> xs
expandTilde f = return f
getQoropaDir :: IO FilePath
getQoropaDir = getAppUserDataDirectory "qoropa"
humanReadableTimeDiff :: TimeZone -> UTCTime -> UTCTime -> String
humanReadableTimeDiff tz curTime oldTime =
helper diff
where
diff = diffUTCTime curTime oldTime
minutes :: NominalDiffTime -> Double
minutes n = realToFrac $ n / 60
hours :: NominalDiffTime -> Double
hours n = (minutes n) / 60
days :: NominalDiffTime -> Double
days n = (hours n) / 24
years :: NominalDiffTime -> Double
years n = (days n) / 365
i2s :: RealFrac a => a -> String
i2s !n = show m
where
m :: Int
m = truncate n
old = utcToLocalTime tz oldTime
today = formatTime defaultTimeLocale "Today %R" old
yesterday = formatTime defaultTimeLocale "Yest. %R" old
dayOfWeek = formatTime defaultTimeLocale "%a. %R" old
thisYear = formatTime defaultTimeLocale "%B %d" old
previousYears = formatTime defaultTimeLocale "%F" old
helper !d | minutes d < 2 = "One min. ago"
| minutes d < 60 = i2s (minutes d) ++ " mins. ago"
| hours d < 24 = today
| hours d < 48 = yesterday
| days d < 5 = dayOfWeek
| years d < 1 = thisYear
| otherwise = previousYears
relativeTime :: CTime -> IO String
relativeTime t = do
tz <- getCurrentTimeZone
now <- getCurrentTime
return $ humanReadableTimeDiff tz now old
where
old = posixSecondsToUTCTime $ realToFrac t
recompile :: Bool -> IO Bool
recompile force = do
dir <- getQoropaDir
let name = "qoropa-" ++ arch ++ "-" ++ os
bin = dir </> name
base = dir </> "qoropa"
err = base ++ ".errors"
src = base ++ ".hs"
lib = dir </> "lib"
libTs <- mapM getModTime . filter isSource =<< allFiles lib
srcT <- getModTime src
binT <- getModTime bin
if force || any (binT <) (srcT : libTs)
then do
status <- bracket (openFile err WriteMode) hClose $ \h ->
waitForProcess =<< runProcess "ghc" [ "--make"
, "qoropa.hs"
, "-i"
, "-ilib"
, "-fforce-recomp"
, "-v0"
, "-o", name
] (Just dir)
Nothing Nothing Nothing (Just h)
return (status == ExitSuccess)
else return True
where
getModTime f = catch (Just <$> getModificationTime f) (\(SomeException _) -> return Nothing)
isSource = flip elem [".hs", ".lhs", ".hsc"]
allFiles t = do
let prep = map (t</>) . filter (`notElem` [".", ".."])
cs <- prep <$> catch (getDirectoryContents t) (\(SomeException _) -> return [])
ds <- filterM doesDirectoryExist cs
concat . ((cs \\ ds):) <$> mapM allFiles ds
-- vim: set ft=haskell et ts=4 sts=4 sw=4 fdm=marker :
| beni55/qoropa | src/Qoropa/Util.hs | gpl-2.0 | 5,548 | 0 | 16 | 1,866 | 1,243 | 653 | 590 | 105 | 2 |
<?xml version="1.0" encoding="UTF-8"?>
<!DOCTYPE helpset PUBLIC "-//Sun Microsystems Inc.//DTD JavaHelp HelpSet Version 2.0//EN" "http://java.sun.com/products/javahelp/helpset_2_0.dtd">
<helpset version="2.0" xml:lang="id-ID">
<title>Port Pemindai | Eksistensi ZAP</title>
<maps>
<homeID>top</homeID>
<mapref location="map.jhm"/>
</maps>
<view>
<name>TOC</name>
<label>Isi</label>
<type>org.zaproxy.zap.extension.help.ZapTocView</type>
<data>toc.xml</data>
</view>
<view>
<name>Index</name>
<label>Indeks</label>
<type>javax.help.IndexView</type>
<data>index.xml</data>
</view>
<view>
<name>Search</name>
<label>Telusuri</label>
<type>javax.help.SearchView</type>
<data engine="com.sun.java.help.search.DefaultSearchEngine">
JavaHelpSearch
</data>
</view>
<view>
<name>Favorites</name>
<label>Favorit</label>
<type>javax.help.FavoritesView</type>
</view>
</helpset> | veggiespam/zap-extensions | addOns/portscan/src/main/javahelp/org/zaproxy/zap/extension/portscan/resources/help_id_ID/helpset_id_ID.hs | apache-2.0 | 972 | 80 | 66 | 160 | 415 | 210 | 205 | -1 | -1 |
{-# LANGUAGE CPP #-}
{-# LANGUAGE ScopedTypeVariables #-}
{-# LANGUAGE OverloadedStrings #-}
{-# LANGUAGE LambdaCase #-}
{-# LANGUAGE FlexibleContexts #-}
{-# LANGUAGE NamedFieldPuns #-}
module GDALDriverSpec (main, spec) where
import GDAL
import GDAL.Internal.GDAL ( createDatasetH )
import GDAL.Internal.HSDriver
-- No parallel specs here because things we do here are would affect other
-- tests
import TestUtils hiding (describe, hspec)
import Test.Hspec (hspec)
import qualified Data.ByteString.Char8 as BS
main :: IO ()
main = hspec spec
spec :: Spec
spec = do
xit "can register a custom driver" $ do
d <- createDriver HSDriver
{ hsdName = "MyDriver"
, hsdIdentify = return . BS.isPrefixOf "foo"
, hsdOpen = \_ -> do
d <- driverByName "MEM"
HsdDatasetH <$> createDatasetH d "MyDesc" (10:+:11) 1 GDT_Byte []
}
registerDriver d
d' <- driverByName "MyDriver"
description d' >>= (`shouldBe` "MyDriver")
openReadOnly "bar.tif" GDT_Byte `shouldThrow` ((==OpenFailed) . gdalErrNum)
ds <- openReadOnly "foo.tif" GDT_Byte
datasetSize ds `shouldBe` (10:+:11)
description ds >>= (`shouldBe` "MyDesc")
deleteDriver d
driverByName "MyDriver" `shouldThrow` (==UnknownDriver "MyDriver")
xit "survives an exception in the identify callback" $ do
let info = HSDriver
{ hsdName = "MyDriver"
, hsdIdentify = error "sheeeeet"
, hsdOpen = const (return HsdError)
}
withDriver info $
openReadOnly "foo.tif" GDT_Byte
`shouldThrow` ((==OpenFailed) . gdalErrNum)
| meteogrid/bindings-gdal | tests/GDALDriverSpec.hs | bsd-3-clause | 1,599 | 0 | 20 | 358 | 404 | 219 | 185 | 41 | 1 |
{-# Language PatternGuards #-}
module Blub
( blub
, foo
, bar
) where
import Ugah.Foo
import Control.Applicative ( a
, b
)
import Ugah.Blub
f :: Int -> Int
f = (+ 3)
g :: Int -> Int
g =
where
| jystic/hsimport | tests/inputFiles/ModuleTest29.hs | bsd-3-clause | 262 | 0 | 5 | 112 | 71 | 44 | 27 | -1 | -1 |
{-# LANGUAGE OverloadedLists #-}
-- | STG error transitions, in order to provide useful information to the user.
module Stg.Machine.Evaluate.ErrorTransitions (
updatableClosureWithArgs,
returnWithEmptyReturnStack,
functionArgumentNotInScope,
constructorArgumentNotInScope,
primopArgumentNotInScope,
algReturnToPrimAlts,
primReturnToAlgAlts,
enterBlackhole,
updateClosureWithPrimitive,
nonAlgPrimScrutinee,
divisionByZero,
badConArity,
) where
import Data.Stack (Stack (..))
import Stg.Language
import Stg.Machine.Env
import Stg.Machine.Evaluate.Common
import qualified Stg.Machine.Heap as H
import Stg.Machine.Types
import Stg.Util
-- | Page 39, 2nd paragraph: "[...] closures with non-empty argument lists are
-- never updatable [...]"
updatableClosureWithArgs :: StgState -> Maybe StgState
updatableClosureWithArgs s@StgState
{ stgCode = Enter addr
, stgHeap = heap }
| Just (HClosure (Closure (LambdaForm _ Update (_:_) _) _))
<- H.lookup addr heap
= Just (s { stgInfo = Info (StateError UpdatableClosureWithArgs) [] })
updatableClosureWithArgs _ = Nothing
-- | Page 39, 4th paragraph: "It is not possible for the ReturnInt state to see an
-- empty return stack, because that would imply that a closure should be updated
-- with a primitive value; but no closure has a primitive type."
returnWithEmptyReturnStack :: StgState -> Maybe StgState
returnWithEmptyReturnStack s@StgState
{ stgCode = ReturnInt{}
, stgStack = Empty }
= Just (s { stgInfo = Info (StateError ReturnIntWithEmptyReturnStack)
[Detail_ReturnIntCannotUpdate] })
returnWithEmptyReturnStack _ = Nothing
-- | A function was applied to an argument that was neither globally defined
-- nor in the local environment
functionArgumentNotInScope :: StgState -> Maybe StgState
functionArgumentNotInScope s@StgState
{ stgCode = Eval (AppF f xs) locals
, stgGlobals = globals }
| Failure notInScope <- vals locals globals (AtomVar f : xs)
= Just (s { stgInfo = Info (StateError (VariablesNotInScope notInScope)) [] })
functionArgumentNotInScope _ = Nothing
-- | A constructor was applied to an argument that was neither globally defined
-- nor in the local environment
constructorArgumentNotInScope :: StgState -> Maybe StgState
constructorArgumentNotInScope s@StgState
{ stgCode = Eval (AppC _con xs) locals
, stgGlobals = globals }
| Failure notInScope <- vals locals globals xs
= Just (s { stgInfo = Info (StateError (VariablesNotInScope notInScope)) [] })
constructorArgumentNotInScope _ = Nothing
-- | A primitive operation was applied to an argument that was neither globally
-- defined nor in the local environment
primopArgumentNotInScope :: StgState -> Maybe StgState
primopArgumentNotInScope s@StgState
{ stgCode = Eval (AppP _op x y) locals }
| Failure notInScope <- traverse (localVal locals) ([x,y] :: [Atom])
= Just (s { stgInfo = Info (StateError (VariablesNotInScope notInScope)) [] })
primopArgumentNotInScope _ = Nothing
-- | Algebraic constructor return, but primitive alternative on return frame
algReturnToPrimAlts :: StgState -> Maybe StgState
algReturnToPrimAlts s@StgState
{ stgCode = ReturnCon{}
, stgStack = ReturnFrame (Alts PrimitiveAlts{} _) _ :< _ }
= Just (s { stgInfo = Info (StateError AlgReturnToPrimAlts) [] })
algReturnToPrimAlts _ = Nothing
-- | Primitive return, but algebraic alternative on return frame
primReturnToAlgAlts :: StgState -> Maybe StgState
primReturnToAlgAlts s@StgState
{ stgCode = ReturnInt _
, stgStack = ReturnFrame (Alts AlgebraicAlts{} _) _ :< _ }
= Just (s { stgInfo = Info (StateError PrimReturnToAlgAlts) [] })
primReturnToAlgAlts _ = Nothing
-- | A black hole was entered, and the infinite recursion detection triggered
-- as a result
enterBlackhole :: StgState -> Maybe StgState
enterBlackhole s@StgState
{ stgCode = Enter addr
, stgHeap = heap }
| Just (Blackhole bhTick) <- H.lookup addr heap
= Just (s
{ stgInfo = Info (StateError EnterBlackhole)
[Detail_EnterBlackHole addr bhTick] })
enterBlackhole _ = Nothing
-- | Closures are always lifted, not primitive
updateClosureWithPrimitive :: StgState -> Maybe StgState
updateClosureWithPrimitive s@StgState
{ stgCode = ReturnInt _
, stgStack = UpdateFrame _ :< _}
= Just (s
{ stgInfo = Info (StateError UpdateClosureWithPrimitive)
[Detail_UpdateClosureWithPrimitive] })
updateClosureWithPrimitive _ = Nothing
-- | Non-algebraic scrutinee
--
-- For more information on this, see 'Stg.Prelude.seq'.
nonAlgPrimScrutinee :: StgState -> Maybe StgState
nonAlgPrimScrutinee s@StgState
{ stgCode = Enter _
, stgStack = ReturnFrame{} :< _}
= Just (s { stgInfo = Info (StateError NonAlgPrimScrutinee) [] })
nonAlgPrimScrutinee _ = Nothing
-- | A primitive division had zero as denominator
divisionByZero :: StgState -> Maybe StgState
divisionByZero s@StgState
{ stgCode = Eval (AppP op x y) locals }
| Success (PrimInt xVal) <- localVal locals x
, Success (PrimInt yVal) <- localVal locals y
, Failure Div0 <- applyPrimOp op xVal yVal
= Just (s { stgInfo = Info (StateError DivisionByZero) [] })
divisionByZero _ = Nothing
-- | Bad constructor arity: different number of arguments in code segment and in
-- return frame
badConArity :: StgState -> Maybe StgState
badConArity s@StgState
{ stgCode = ReturnCon con ws
, stgStack = ReturnFrame alts _ :< _ }
| Success (AltMatches (AlgebraicAlt _con vars _)) <- lookupAlgebraicAlt alts con
, length ws /= length vars
= Just (s { stgInfo = Info (StateError (BadConArity (length ws) (length vars)))
[Detail_BadConArity] })
badConArity _ = Nothing
| quchen/stg | src/Stg/Machine/Evaluate/ErrorTransitions.hs | bsd-3-clause | 5,965 | 0 | 17 | 1,293 | 1,464 | 770 | 694 | 108 | 1 |
module ETA.Util
(indexList,
upperFirst,
scanM,
expectJust,
safeHead,
safeLast)
where
import qualified Data.Char as C
import Data.Text (Text, empty, uncons, cons)
import ETA.Utils.Maybes(expectJust)
indexList :: (Integral b) => [a] -> [(b, a)]
indexList = zip [1..]
upperFirst :: Text -> Text
upperFirst str = case uncons str of
Nothing -> empty
Just (c, str') -> cons (C.toUpper c) str'
scanM :: (Monad m) => (a -> b -> m a) -> a -> [b] -> m [a]
scanM _ q [] = return [q]
scanM f q (x:xs) =
do q2 <- f q x
qs <- scanM f q2 xs
return (q:qs)
safeHead :: [a] -> Maybe a
safeHead (x:_) = Just x
safeHead _ = Nothing
safeLast :: [a] -> Maybe a
safeLast xs = if null xs then Nothing else Just $ last xs
| pparkkin/eta | compiler/ETA/Util.hs | bsd-3-clause | 743 | 0 | 11 | 182 | 372 | 200 | 172 | 27 | 2 |
module Operator00001 where
renderNode (s, a) = text (label a) # bold # font "sans-serif"
| charleso/intellij-haskforce | tests/gold/parser/Operator00001.hs | apache-2.0 | 90 | 0 | 9 | 16 | 38 | 20 | 18 | 2 | 1 |
{-# LANGUAGE CPP #-}
{- |
Module : XMonad.Util.XSelection
Copyright : (C) 2007 Andrea Rossato, Matthew Sackman
License : BSD3
Maintainer : Gwern Branwen <gwern0@gmail.com>
Stability : unstable
Portability : unportable
A module for accessing and manipulating X Window's mouse selection (the buffer used in copy and pasting).
'getSelection' is an adaptation of Hxsel.hs and Hxput.hs from the XMonad-utils, available:
> $ darcs get <http://gorgias.mine.nu/repos/xmonad-utils>
-}
module XMonad.Util.XSelection ( -- * Usage
-- $usage
getSelection,
promptSelection,
safePromptSelection,
transformPromptSelection,
transformSafePromptSelection) where
import Control.Exception.Extensible as E (catch,SomeException(..))
import Control.Monad (liftM, join)
import Data.Maybe (fromMaybe)
import XMonad
import XMonad.Util.Run (safeSpawn, unsafeSpawn)
import Codec.Binary.UTF8.String (decode)
{- $usage
Add @import XMonad.Util.XSelection@ to the top of Config.hs
Then make use of getSelection or promptSelection as needed; if
one wanted to run Firefox with the selection as an argument (perhaps
the selection string is an URL you just highlighted), then one could add
to the xmonad.hs a line like thus:
> , ((modm .|. shiftMask, xK_b), promptSelection "firefox")
Future improvements for XSelection:
* More elaborate functionality: Emacs' registers are nice; if you
don't know what they are, see <http://www.gnu.org/software/emacs/manual/html_node/emacs/Registers.html#Registers> -}
-- | Returns a String corresponding to the current mouse selection in X;
-- if there is none, an empty string is returned.
getSelection :: MonadIO m => m String
getSelection = io $ do
dpy <- openDisplay ""
let dflt = defaultScreen dpy
rootw <- rootWindow dpy dflt
win <- createSimpleWindow dpy rootw 0 0 1 1 0 0 0
p <- internAtom dpy "PRIMARY" True
ty <- E.catch
(E.catch
(internAtom dpy "UTF8_STRING" False)
(\(E.SomeException _) -> internAtom dpy "COMPOUND_TEXT" False))
(\(E.SomeException _) -> internAtom dpy "sTring" False)
clp <- internAtom dpy "BLITZ_SEL_STRING" False
xConvertSelection dpy p ty clp win currentTime
allocaXEvent $ \e -> do
nextEvent dpy e
ev <- getEvent e
if ev_event_type ev == selectionNotify
then do res <- getWindowProperty8 dpy clp win
return $ decode . map fromIntegral . fromMaybe [] $ res
else destroyWindow dpy win >> return ""
{- | A wrapper around 'getSelection'. Makes it convenient to run a program with the current selection as an argument.
This is convenient for handling URLs, in particular. For example, in your Config.hs you could bind a key to
@promptSelection \"firefox\"@;
this would allow you to highlight a URL string and then immediately open it up in Firefox.
'promptSelection' passes strings through the system shell, \/bin\/sh; if you do not wish your selected text
to be interpreted or mangled by the shell, use 'safePromptSelection'. safePromptSelection will bypass the
shell using 'safeSpawn' from "XMonad.Util.Run"; see its documentation for more
details on the advantages and disadvantages of using safeSpawn. -}
promptSelection, safePromptSelection, unsafePromptSelection :: String -> X ()
promptSelection = unsafePromptSelection
safePromptSelection app = join $ io $ liftM (safeSpawn app . return) getSelection
unsafePromptSelection app = join $ io $ liftM unsafeSpawn $ fmap (\x -> app ++ " " ++ x) getSelection
{- | A wrapper around 'promptSelection' and its safe variant. They take two parameters, the
first is a function that transforms strings, and the second is the application to run.
The transformer essentially transforms the selection in X.
One example is to wrap code, such as a command line action copied out of the browser
to be run as @"sudo" ++ cmd@ or @"su - -c \""++ cmd ++"\""@. -}
transformPromptSelection, transformSafePromptSelection :: (String -> String) -> String -> X ()
transformPromptSelection f app = join $ io $ liftM (safeSpawn app . return) (fmap f getSelection)
transformSafePromptSelection f app = join $ io $ liftM unsafeSpawn $ fmap (\x -> app ++ " " ++ x) (fmap f getSelection)
| MasseR/xmonadcontrib | XMonad/Util/XSelection.hs | bsd-3-clause | 4,474 | 0 | 18 | 1,036 | 634 | 327 | 307 | 41 | 2 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.