code
stringlengths 2
1.05M
| repo_name
stringlengths 5
101
| path
stringlengths 4
991
| language
stringclasses 3
values | license
stringclasses 5
values | size
int64 2
1.05M
|
|---|---|---|---|---|---|
module Rebase.GHC.IO.Encoding.Types
(
module GHC.IO.Encoding.Types
)
where
import GHC.IO.Encoding.Types
|
nikita-volkov/rebase
|
library/Rebase/GHC/IO/Encoding/Types.hs
|
Haskell
|
mit
| 107
|
{-# LANGUAGE OverloadedStrings #-}
{-# LANGUAGE RecordWildCards #-}
{-# LANGUAGE StrictData #-}
{-# LANGUAGE TupleSections #-}
-- | http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-iot1click-project-placementtemplate.html
module Stratosphere.ResourceProperties.IoT1ClickProjectPlacementTemplate where
import Stratosphere.ResourceImports
-- | Full data type definition for IoT1ClickProjectPlacementTemplate. See
-- 'ioT1ClickProjectPlacementTemplate' for a more convenient constructor.
data IoT1ClickProjectPlacementTemplate =
IoT1ClickProjectPlacementTemplate
{ _ioT1ClickProjectPlacementTemplateDefaultAttributes :: Maybe Object
, _ioT1ClickProjectPlacementTemplateDeviceTemplates :: Maybe Object
} deriving (Show, Eq)
instance ToJSON IoT1ClickProjectPlacementTemplate where
toJSON IoT1ClickProjectPlacementTemplate{..} =
object $
catMaybes
[ fmap (("DefaultAttributes",) . toJSON) _ioT1ClickProjectPlacementTemplateDefaultAttributes
, fmap (("DeviceTemplates",) . toJSON) _ioT1ClickProjectPlacementTemplateDeviceTemplates
]
-- | Constructor for 'IoT1ClickProjectPlacementTemplate' containing required
-- fields as arguments.
ioT1ClickProjectPlacementTemplate
:: IoT1ClickProjectPlacementTemplate
ioT1ClickProjectPlacementTemplate =
IoT1ClickProjectPlacementTemplate
{ _ioT1ClickProjectPlacementTemplateDefaultAttributes = Nothing
, _ioT1ClickProjectPlacementTemplateDeviceTemplates = Nothing
}
-- | http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-iot1click-project-placementtemplate.html#cfn-iot1click-project-placementtemplate-defaultattributes
itcpptDefaultAttributes :: Lens' IoT1ClickProjectPlacementTemplate (Maybe Object)
itcpptDefaultAttributes = lens _ioT1ClickProjectPlacementTemplateDefaultAttributes (\s a -> s { _ioT1ClickProjectPlacementTemplateDefaultAttributes = a })
-- | http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-iot1click-project-placementtemplate.html#cfn-iot1click-project-placementtemplate-devicetemplates
itcpptDeviceTemplates :: Lens' IoT1ClickProjectPlacementTemplate (Maybe Object)
itcpptDeviceTemplates = lens _ioT1ClickProjectPlacementTemplateDeviceTemplates (\s a -> s { _ioT1ClickProjectPlacementTemplateDeviceTemplates = a })
|
frontrowed/stratosphere
|
library-gen/Stratosphere/ResourceProperties/IoT1ClickProjectPlacementTemplate.hs
|
Haskell
|
mit
| 2,300
|
--
import Data.Char
data Operator = Plus | Minus | Times | Div deriving (Show,Eq)
opToChar :: Operator -> Char
-- opToChar = undefined
opToChar Plus = '+'
opToChar Minus = '-'
opToChar Times = '*'
opToChar Div = '/'
opToStr :: Operator -> String
opToStr Plus = "+"
opToStr Minus = "-"
opToStr Times = "*"
opToStr Div = "/"
data Token = TokOp Operator
| TokIdent String
| TokNum Int
| TokSpace
deriving (Show,Eq)
showContent :: Token -> String
showContent (TokOp op) = opToStr op
showContent (TokIdent str) = str
showContent (TokNum i) = show i
token :: Token
token = TokIdent "x"
operator :: Char -> Operator
operator c | c == '+' = Plus
| c == '-' = Minus
| c == '*' = Times
| c == '/' = Div
tokenizeChar :: Char -> Token
tokenizeChar c
| elem c "+/-*" = TokOp (operator c)
| isDigit c = TokNum (digitToInt c)
| isAlpha c = TokIdent [c]
| isSpace c = TokSpace
| otherwise = error $ "Cannot tokenizeChar " ++ [c]
tokenize :: String -> [Token]
tokenize = map tokenizeChar
-- isDigit :: Char -> Bool
-- isDigit c = elem c ['0'..'9']
-- isAlpha :: Char -> Bool
-- isAlpha c = elem c $ ['a'..'z'] ++ ['A'..'Z']
-- isSpace :: Char -> Bool
-- isSpace c = elem c $ " "
-- digitToInt :: Char -> Int
-- digitToInt c | isDigit c = fromEnum c - 48
digitToInts :: String -> [Int]
digitToInts = map digitToInt
deSpace :: [Token] -> [Token]
deSpace = filter (\t -> t /= TokSpace)
alnums :: String -> (String, String)
alnums str = als "" str
where
als acc [] = (acc, [])
als acc (c:cs) | isAlphaNum c = als (c:acc) cs
| otherwise = (reverse(acc), c:cs)
-- Scales with O(N^2), N = len(str)
type Accum = (Bool, String, String)
alnums' :: String -> (String,String)
alnums' str = let (_, als, rest) = foldl f (True, [], []) str
in (als, rest)
where
f(True, als, rest) c | isAlphaNum c = (True, als ++ [c], rest)
| otherwise = (False, als, [c])
f(False, als, rest) c = (False, als, rest ++ [c])
digits :: String -> (String, String)
digits str = digs [] str
where
digs acc [] = (acc, [])
digs acc (c:cs) | isDigit c = digs (c:acc) cs
| otherwise = (reverse(acc), c:cs)
rev :: String -> String
rev = foldl (\acc a -> a:acc) []
cpy :: String -> String
cpy = foldr (\a acc -> a:acc) []
span' :: (a->Bool) -> [a] -> ([a],[a])
span' pred str = spanAcc [] str
where
spanAcc acc [] = (acc, [])
spanAcc acc (c:cs) | pred c = spanAcc (c:acc) cs
| otherwise = (reverse(acc), c:cs)
span'' :: (a->Bool) -> [a] -> ([a],[a])
span'' pred str =
let -- define helper
spanAcc acc [] = (acc, [])
spanAcc acc (c:cs) | pred c = spanAcc (c:acc) cs
| otherwise = (reverse(acc), c:cs)
in
spanAcc [] str
main = do
putStrLn $ showContent token
print token
print $ operator '*'
print $ tokenize "**/+"
print $ deSpace $ tokenize "1 + 4 / x"
print $ digitToInts "1234"
print $ alnums "R2D2+C3Po"
print $ alnums "a14"
print $ alnums' "R2D2+C3Po"
print $ alnums' "a14"
print $ rev "1234"
print $ cpy "1234"
print $ digits "1234abc 5678"
print $ span' (\c -> isAlphaNum c) "R2D2+C3Po"
print $ span' isAlphaNum "R2D2+C3Po"
print $ span'' isDigit "1234abc 5678"
|
egaburov/funstuff
|
Haskell/BartoszBofH/7_TokenizerHOF/tokenize.hs
|
Haskell
|
apache-2.0
| 3,402
|
{-# LANGUAGE CPP #-}
-----------------------------------------------------------------------------
-- |
-- Module : Haddock.Utils
-- Copyright : (c) The University of Glasgow 2001-2002,
-- Simon Marlow 2003-2006,
-- David Waern 2006-2009
-- License : BSD-like
--
-- Maintainer : haddock@projects.haskell.org
-- Stability : experimental
-- Portability : portable
-----------------------------------------------------------------------------
module Haddock.Utils (
-- * Misc utilities
restrictTo, emptyHsQTvs,
toDescription, toInstalledDescription,
-- * Filename utilities
moduleHtmlFile, moduleHtmlFile',
contentsHtmlFile, indexHtmlFile,
frameIndexHtmlFile,
moduleIndexFrameName, mainFrameName, synopsisFrameName,
subIndexHtmlFile,
jsFile, framesFile,
-- * Anchor and URL utilities
moduleNameUrl, moduleNameUrl', moduleUrl,
nameAnchorId,
makeAnchorId,
-- * Miscellaneous utilities
getProgramName, bye, die, dieMsg, noDieMsg, mapSnd, mapMaybeM, escapeStr,
-- * HTML cross reference mapping
html_xrefs_ref, html_xrefs_ref',
-- * Doc markup
markup,
idMarkup,
mkMeta,
-- * List utilities
replace,
spanWith,
-- * MTL stuff
MonadIO(..),
-- * Logging
parseVerbosity,
out,
-- * System tools
getProcessID
) where
import Documentation.Haddock.Doc (emptyMetaDoc)
import Haddock.Types
import Haddock.GhcUtils
import GHC
import Name
import Control.Monad ( liftM )
import Data.Char ( isAlpha, isAlphaNum, isAscii, ord, chr )
import Numeric ( showIntAtBase )
import Data.Map ( Map )
import qualified Data.Map as Map hiding ( Map )
import Data.IORef ( IORef, newIORef, readIORef )
import Data.List ( isSuffixOf )
import Data.Maybe ( mapMaybe )
import System.Environment ( getProgName )
import System.Exit
import System.IO ( hPutStr, stderr )
import System.IO.Unsafe ( unsafePerformIO )
import qualified System.FilePath.Posix as HtmlPath
import Distribution.Verbosity
import Distribution.ReadE
#ifndef mingw32_HOST_OS
import qualified System.Posix.Internals
#endif
import MonadUtils ( MonadIO(..) )
--------------------------------------------------------------------------------
-- * Logging
--------------------------------------------------------------------------------
parseVerbosity :: String -> Either String Verbosity
parseVerbosity = runReadE flagToVerbosity
-- | Print a message to stdout, if it is not too verbose
out :: MonadIO m
=> Verbosity -- ^ program verbosity
-> Verbosity -- ^ message verbosity
-> String -> m ()
out progVerbosity msgVerbosity msg
| msgVerbosity <= progVerbosity = liftIO $ putStrLn msg
| otherwise = return ()
--------------------------------------------------------------------------------
-- * Some Utilities
--------------------------------------------------------------------------------
-- | Extract a module's short description.
toDescription :: Interface -> Maybe (MDoc Name)
toDescription = fmap mkMeta . hmi_description . ifaceInfo
-- | Extract a module's short description.
toInstalledDescription :: InstalledInterface -> Maybe (MDoc Name)
toInstalledDescription = fmap mkMeta . hmi_description . instInfo
mkMeta :: Doc a -> MDoc a
mkMeta x = emptyMetaDoc { _doc = x }
--------------------------------------------------------------------------------
-- * Making abstract declarations
--------------------------------------------------------------------------------
restrictTo :: [Name] -> LHsDecl Name -> LHsDecl Name
restrictTo names (L loc decl) = L loc $ case decl of
TyClD d | isDataDecl d ->
TyClD (d { tcdDataDefn = restrictDataDefn names (tcdDataDefn d) })
TyClD d | isClassDecl d ->
TyClD (d { tcdSigs = restrictDecls names (tcdSigs d),
tcdATs = restrictATs names (tcdATs d) })
_ -> decl
restrictDataDefn :: [Name] -> HsDataDefn Name -> HsDataDefn Name
restrictDataDefn names defn@(HsDataDefn { dd_ND = new_or_data, dd_cons = cons })
| DataType <- new_or_data
= defn { dd_cons = restrictCons names cons }
| otherwise -- Newtype
= case restrictCons names cons of
[] -> defn { dd_ND = DataType, dd_cons = [] }
[con] -> defn { dd_cons = [con] }
_ -> error "Should not happen"
restrictCons :: [Name] -> [LConDecl Name] -> [LConDecl Name]
restrictCons names decls = [ L p d | L p (Just d) <- map (fmap keep) decls ]
where
keep d | any (\n -> n `elem` names) (map unLoc $ con_names d) =
case con_details d of
PrefixCon _ -> Just d
RecCon fields
| all field_avail fields -> Just d
| otherwise -> Just (d { con_details = PrefixCon (field_types (map unL fields)) })
-- if we have *all* the field names available, then
-- keep the record declaration. Otherwise degrade to
-- a constructor declaration. This isn't quite right, but
-- it's the best we can do.
InfixCon _ _ -> Just d
where
field_avail (L _ (ConDeclField ns _ _)) = all (\n -> unLoc n `elem` names) ns
field_types flds = [ t | ConDeclField _ t _ <- flds ]
keep _ = Nothing
restrictDecls :: [Name] -> [LSig Name] -> [LSig Name]
restrictDecls names = mapMaybe (filterLSigNames (`elem` names))
restrictATs :: [Name] -> [LFamilyDecl Name] -> [LFamilyDecl Name]
restrictATs names ats = [ at | at <- ats , unL (fdLName (unL at)) `elem` names ]
emptyHsQTvs :: LHsTyVarBndrs Name
-- This function is here, rather than in HsTypes, because it *renamed*, but
-- does not necessarily have all the rigt kind variables. It is used
-- in Haddock just for printing, so it doesn't matter
emptyHsQTvs = HsQTvs { hsq_kvs = error "haddock:emptyHsQTvs", hsq_tvs = [] }
--------------------------------------------------------------------------------
-- * Filename mangling functions stolen from s main/DriverUtil.lhs.
--------------------------------------------------------------------------------
baseName :: ModuleName -> FilePath
baseName = map (\c -> if c == '.' then '-' else c) . moduleNameString
moduleHtmlFile :: Module -> FilePath
moduleHtmlFile mdl =
case Map.lookup mdl html_xrefs of
Nothing -> baseName mdl' ++ ".html"
Just fp0 -> HtmlPath.joinPath [fp0, baseName mdl' ++ ".html"]
where
mdl' = moduleName mdl
moduleHtmlFile' :: ModuleName -> FilePath
moduleHtmlFile' mdl =
case Map.lookup mdl html_xrefs' of
Nothing -> baseName mdl ++ ".html"
Just fp0 -> HtmlPath.joinPath [fp0, baseName mdl ++ ".html"]
contentsHtmlFile, indexHtmlFile :: String
contentsHtmlFile = "index.html"
indexHtmlFile = "doc-index.html"
-- | The name of the module index file to be displayed inside a frame.
-- Modules are display in full, but without indentation. Clicking opens in
-- the main window.
frameIndexHtmlFile :: String
frameIndexHtmlFile = "index-frames.html"
moduleIndexFrameName, mainFrameName, synopsisFrameName :: String
moduleIndexFrameName = "modules"
mainFrameName = "main"
synopsisFrameName = "synopsis"
subIndexHtmlFile :: String -> String
subIndexHtmlFile ls = "doc-index-" ++ b ++ ".html"
where b | all isAlpha ls = ls
| otherwise = concatMap (show . ord) ls
-------------------------------------------------------------------------------
-- * Anchor and URL utilities
--
-- NB: Anchor IDs, used as the destination of a link within a document must
-- conform to XML's NAME production. That, taken with XHTML and HTML 4.01's
-- various needs and compatibility constraints, means these IDs have to match:
-- [A-Za-z][A-Za-z0-9:_.-]*
-- Such IDs do not need to be escaped in any way when used as the fragment part
-- of a URL. Indeed, %-escaping them can lead to compatibility issues as it
-- isn't clear if such fragment identifiers should, or should not be unescaped
-- before being matched with IDs in the target document.
-------------------------------------------------------------------------------
moduleUrl :: Module -> String
moduleUrl = moduleHtmlFile
moduleNameUrl :: Module -> OccName -> String
moduleNameUrl mdl n = moduleUrl mdl ++ '#' : nameAnchorId n
moduleNameUrl' :: ModuleName -> OccName -> String
moduleNameUrl' mdl n = moduleHtmlFile' mdl ++ '#' : nameAnchorId n
nameAnchorId :: OccName -> String
nameAnchorId name = makeAnchorId (prefix : ':' : occNameString name)
where prefix | isValOcc name = 'v'
| otherwise = 't'
-- | Takes an arbitrary string and makes it a valid anchor ID. The mapping is
-- identity preserving.
makeAnchorId :: String -> String
makeAnchorId [] = []
makeAnchorId (f:r) = escape isAlpha f ++ concatMap (escape isLegal) r
where
escape p c | p c = [c]
| otherwise = '-' : show (ord c) ++ "-"
isLegal ':' = True
isLegal '_' = True
isLegal '.' = True
isLegal c = isAscii c && isAlphaNum c
-- NB: '-' is legal in IDs, but we use it as the escape char
-------------------------------------------------------------------------------
-- * Files we need to copy from our $libdir
-------------------------------------------------------------------------------
jsFile, framesFile :: String
jsFile = "haddock-util.js"
framesFile = "frames.html"
-------------------------------------------------------------------------------
-- * Misc.
-------------------------------------------------------------------------------
getProgramName :: IO String
getProgramName = liftM (`withoutSuffix` ".bin") getProgName
where str `withoutSuffix` suff
| suff `isSuffixOf` str = take (length str - length suff) str
| otherwise = str
bye :: String -> IO a
bye s = putStr s >> exitSuccess
dieMsg :: String -> IO ()
dieMsg s = getProgramName >>= \prog -> die (prog ++ ": " ++ s)
noDieMsg :: String -> IO ()
noDieMsg s = getProgramName >>= \prog -> hPutStr stderr (prog ++ ": " ++ s)
mapSnd :: (b -> c) -> [(a,b)] -> [(a,c)]
mapSnd _ [] = []
mapSnd f ((x,y):xs) = (x,f y) : mapSnd f xs
mapMaybeM :: Monad m => (a -> m b) -> Maybe a -> m (Maybe b)
mapMaybeM _ Nothing = return Nothing
mapMaybeM f (Just a) = liftM Just (f a)
escapeStr :: String -> String
escapeStr = escapeURIString isUnreserved
-- Following few functions are copy'n'pasted from Network.URI module
-- to avoid depending on the network lib, since doing so gives a
-- circular build dependency between haddock and network
-- (at least if you want to build network with haddock docs)
escapeURIChar :: (Char -> Bool) -> Char -> String
escapeURIChar p c
| p c = [c]
| otherwise = '%' : myShowHex (ord c) ""
where
myShowHex :: Int -> ShowS
myShowHex n r = case showIntAtBase 16 toChrHex n r of
[] -> "00"
[a] -> ['0',a]
cs -> cs
toChrHex d
| d < 10 = chr (ord '0' + fromIntegral d)
| otherwise = chr (ord 'A' + fromIntegral (d - 10))
escapeURIString :: (Char -> Bool) -> String -> String
escapeURIString = concatMap . escapeURIChar
isUnreserved :: Char -> Bool
isUnreserved c = isAlphaNumChar c || (c `elem` "-_.~")
isAlphaChar, isDigitChar, isAlphaNumChar :: Char -> Bool
isAlphaChar c = (c >= 'A' && c <= 'Z') || (c >= 'a' && c <= 'z')
isDigitChar c = c >= '0' && c <= '9'
isAlphaNumChar c = isAlphaChar c || isDigitChar c
-----------------------------------------------------------------------------
-- * HTML cross references
--
-- For each module, we need to know where its HTML documentation lives
-- so that we can point hyperlinks to it. It is extremely
-- inconvenient to plumb this information to all the places that need
-- it (basically every function in HaddockHtml), and furthermore the
-- mapping is constant for any single run of Haddock. So for the time
-- being I'm going to use a write-once global variable.
-----------------------------------------------------------------------------
{-# NOINLINE html_xrefs_ref #-}
html_xrefs_ref :: IORef (Map Module FilePath)
html_xrefs_ref = unsafePerformIO (newIORef (error "module_map"))
{-# NOINLINE html_xrefs_ref' #-}
html_xrefs_ref' :: IORef (Map ModuleName FilePath)
html_xrefs_ref' = unsafePerformIO (newIORef (error "module_map"))
{-# NOINLINE html_xrefs #-}
html_xrefs :: Map Module FilePath
html_xrefs = unsafePerformIO (readIORef html_xrefs_ref)
{-# NOINLINE html_xrefs' #-}
html_xrefs' :: Map ModuleName FilePath
html_xrefs' = unsafePerformIO (readIORef html_xrefs_ref')
-----------------------------------------------------------------------------
-- * List utils
-----------------------------------------------------------------------------
replace :: Eq a => a -> a -> [a] -> [a]
replace a b = map (\x -> if x == a then b else x)
spanWith :: (a -> Maybe b) -> [a] -> ([b],[a])
spanWith _ [] = ([],[])
spanWith p xs@(a:as)
| Just b <- p a = let (bs,cs) = spanWith p as in (b:bs,cs)
| otherwise = ([],xs)
-----------------------------------------------------------------------------
-- * Put here temporarily
-----------------------------------------------------------------------------
markup :: DocMarkup id a -> Doc id -> a
markup m DocEmpty = markupEmpty m
markup m (DocAppend d1 d2) = markupAppend m (markup m d1) (markup m d2)
markup m (DocString s) = markupString m s
markup m (DocParagraph d) = markupParagraph m (markup m d)
markup m (DocIdentifier x) = markupIdentifier m x
markup m (DocIdentifierUnchecked x) = markupIdentifierUnchecked m x
markup m (DocModule mod0) = markupModule m mod0
markup m (DocWarning d) = markupWarning m (markup m d)
markup m (DocEmphasis d) = markupEmphasis m (markup m d)
markup m (DocBold d) = markupBold m (markup m d)
markup m (DocMonospaced d) = markupMonospaced m (markup m d)
markup m (DocUnorderedList ds) = markupUnorderedList m (map (markup m) ds)
markup m (DocOrderedList ds) = markupOrderedList m (map (markup m) ds)
markup m (DocDefList ds) = markupDefList m (map (markupPair m) ds)
markup m (DocCodeBlock d) = markupCodeBlock m (markup m d)
markup m (DocHyperlink l) = markupHyperlink m l
markup m (DocAName ref) = markupAName m ref
markup m (DocPic img) = markupPic m img
markup m (DocProperty p) = markupProperty m p
markup m (DocExamples e) = markupExample m e
markup m (DocHeader (Header l t)) = markupHeader m (Header l (markup m t))
markupPair :: DocMarkup id a -> (Doc id, Doc id) -> (a, a)
markupPair m (a,b) = (markup m a, markup m b)
-- | The identity markup
idMarkup :: DocMarkup a (Doc a)
idMarkup = Markup {
markupEmpty = DocEmpty,
markupString = DocString,
markupParagraph = DocParagraph,
markupAppend = DocAppend,
markupIdentifier = DocIdentifier,
markupIdentifierUnchecked = DocIdentifierUnchecked,
markupModule = DocModule,
markupWarning = DocWarning,
markupEmphasis = DocEmphasis,
markupBold = DocBold,
markupMonospaced = DocMonospaced,
markupUnorderedList = DocUnorderedList,
markupOrderedList = DocOrderedList,
markupDefList = DocDefList,
markupCodeBlock = DocCodeBlock,
markupHyperlink = DocHyperlink,
markupAName = DocAName,
markupPic = DocPic,
markupProperty = DocProperty,
markupExample = DocExamples,
markupHeader = DocHeader
}
-----------------------------------------------------------------------------
-- * System tools
-----------------------------------------------------------------------------
#ifdef mingw32_HOST_OS
foreign import ccall unsafe "_getpid" getProcessID :: IO Int -- relies on Int == Int32 on Windows
#else
getProcessID :: IO Int
getProcessID = fmap fromIntegral System.Posix.Internals.c_getpid
#endif
|
jstolarek/haddock
|
haddock-api/src/Haddock/Utils.hs
|
Haskell
|
bsd-2-clause
| 15,975
|
{-# LANGUAGE ScopedTypeVariables #-}
module Import
( module Prelude
, module Foundation
, (<>)
, Text
, module Data.Monoid
, module Control.Applicative
, module Gitolite
, module Data.Maybe
, module Settings.StaticFiles
, getGitolite
, withRepo
, withRepoObj
, isBlob
, repoLayout
, treeLink
, isTree
, module Yesod.Auth
, module ContentTypes
, isRegularFile
, isDirectory
, renderPath
, module Encodings
) where
import Yesod.Auth hiding (Route)
import Yesod.Default.Config
import Prelude hiding (writeFile, readFile, catch)
import Foundation
import Text.Hamlet (hamletFile)
import Data.Monoid (Monoid (mappend, mempty, mconcat))
import Control.Applicative ((<$>), (<*>), pure)
import Data.Text (Text)
import Gitolite hiding (User)
import qualified Data.Git as Git
import qualified System.Git as Git
import qualified Data.ByteString.Char8 as BS
import Data.List
import qualified Settings
import Settings.StaticFiles
import Data.Maybe (fromMaybe, listToMaybe)
import Database.Persist.Store
import qualified Data.Text as T
import Encodings
import ContentTypes
import Control.Exception (try, SomeException(..))
import System.FilePath
import System.Directory
isBlob, isTree :: Git.GitObject -> Bool
isBlob (Git.GoBlob _ _) = True
isBlob _ = False
isTree (Git.GoTree _ _) = True
isTree _ = False
isRegularFile :: Git.GitTreeEntry -> Bool
isRegularFile (Git.GitTreeEntry (Git.RegularFile _) _ _) = True
isRegularFile _ = False
isDirectory :: Git.GitTreeEntry -> Bool
isDirectory (Git.GitTreeEntry Git.Directory _ _) = True
isDirectory _ = False
withRepoObj :: String
-> ObjPiece
-> (Gitolite -> Repository -> Git.GitObject -> Handler a)
-> Handler a
withRepoObj repon (ObjPiece commit path) act = do
withRepo repon $ \git repo -> do
let gitDir = repoDir git repo
(prefix, rest) = splitAt 2 commit
root <- liftIO $ do
isHash <- doesFileExist $ gitDir </> "objects" </> prefix </> rest
if isHash
then Git.sha1ToObj (Git.SHA1 commit) gitDir
else repoBranch git repo commit >>= flip Git.sha1ToObj gitDir . commitRef . branchHEAD
let curPath = intercalate "/" (commit:path)
obj <- liftIO $ traverseGoTree git repo path root
setSessionBS "curPath" (BS.pack curPath)
ans <- act git repo obj
deleteSession "curPath"
return ans
withRepo :: String -> (Gitolite -> Repository -> Handler a) -> Handler a
withRepo repon act = do
git <- getGitolite
let mrep = find ((== repon) . repoName) $ repositories git
case mrep of
Nothing -> notFound
Just repo -> do
mu <- maybeAuth
let uName = maybe Settings.guestName (userIdent . entityVal) mu
if repo `isReadableFor` uName
then act git repo
else permissionDenied $ T.pack $
"You don't have permission for repository " ++ repon
getGitolite :: Handler Gitolite
getGitolite = liftIO $ parseGitolite repositoriesPath
renderPath :: ObjPiece -> String
renderPath (ObjPiece a b) = intercalate "/" (a:b)
infixr 5 <>
(<>) :: Monoid m => m -> m -> m
(<>) = mappend
treeLink :: String -> ObjPiece -> Widget
treeLink repon (ObjPiece c as) =
let ents = if null as then [[]] else init $ inits as
in [whamlet|
<ul .breadcrumb>
$forall e <- ents
<li>
<span .divider>/
<a href=@{TreeR repon (ObjPiece c e)}>
$if null e
#{c}
$else
#{last e}
$if (not (null as))
<li>
<span .divider>/ #
#{last as}
|]
repoLayout :: String -> ObjPiece -> Widget -> Handler RepHtml
repoLayout repon op@(ObjPiece commit ps) widget = withRepoObj repon op $ \git repo obj -> do
master <- getYesod
mmsg <- getMessage
route <- getCurrentRoute
let curTab = case route of
Just (TreeR _ _) -> "tab_files" :: String
Just (BlobR _ _) -> "tab_files"
Just (TagsR _) -> "tab_tags"
Just (CommitsR _ _) -> "tab_commits"
Just (CommitR _ _) -> "tab_commits"
_ -> "tab_files"
description <- liftIO $ getDescription git repo
branches <- liftIO $ repoBranches git repo
musr <- fmap entityVal <$> maybeAuth
let curPath = treeLink repon op
pc <- widgetToPageContent $ do
addScriptRemote "https://ajax.googleapis.com/ajax/libs/jquery/1.7.1/jquery.min.js"
addScript $ StaticR js_bootstrap_dropdown_js
addStylesheet $ StaticR css_bootstrap_responsive_css
addStylesheet $ StaticR css_bootstrap_css
$(widgetFile "normalize")
$(widgetFile "repo-layout")
hamletToRepHtml $(hamletFile "templates/default-layout-wrapper.hamlet")
|
konn/gitolist
|
Import.hs
|
Haskell
|
bsd-2-clause
| 4,831
|
{-# LANGUAGE TupleSections #-}
module Driver where
import Control.Applicative ((<$>),(<*>))
import Control.Monad (when)
import Data.Foldable (forM_)
import Distribution.Package
import Distribution.PackageDescription
import Distribution.ModuleName
import Distribution.PackageDescription.Parse
import Distribution.Verbosity
import System.FilePath ((</>))
import System.Process (system)
--
import MetaPackage
cabalFile :: AProject -> FilePath
cabalFile (AProject pname ppath) = ppath </> (pname ++ ".cabal")
parseAProject :: AProject -> IO GenericPackageDescription
parseAProject proj =
let cabal = cabalFile proj
in readPackageDescription normal cabal
-- | relative path info to absolute path info for modules
absolutePathModule :: AProject -> (FilePath,ModuleName) -> (FilePath,ModuleName)
absolutePathModule proj (fp,modname) =
let absolutify dir = projloc proj </> dir
in (absolutify fp,modname)
hyphenToUnderscore :: String -> String
hyphenToUnderscore = map (\x -> if x == '-' then '_' else x)
-- | driver IO action for make a meta package
makeMetaPackage :: MetaProject -> String -> IO FilePath
makeMetaPackage mp extra = do
parsedpkgs <- mapM (\x -> (x,) <$> parseAProject x) (metaProjectPkg mp)
let allmodules = getAllModules parsedpkgs
(pkgpath,srcpath) <- initializeMetaPackage mp
forM_ parsedpkgs $ \x ->
let xname = (projname . fst) x
xpath = (projloc . fst) x
in linkDirectory xpath (pkgpath </> "data_" ++ (hyphenToUnderscore xname))
mapM_ (linkMod srcpath) . concatMap (\(proj,lst) -> map (absolutePathModule proj) lst) $ allmodules
let allmodnames = do (_,ns) <- allmodules
(_,m) <- ns
return m
allothermodnames = do (_,ns) <- getAllOtherModules parsedpkgs
(_,m) <- ns
return m
allothermodnamestrings = map components allothermodnames
pathsAction strs = when (take 6 (head strs) == "Paths_") $ do
let pname = drop 6 (head strs)
makePaths_xxxHsFile pkgpath mp pname
mapM_ pathsAction (allothermodnamestrings ++ map components allmodnames)
{-
let exelst = getExeFileAndCabalString bc mp pkgpath pkgs
exestr = concatMap snd exelst
mapM_ (linkExeSrcFile . fst) exelst
-}
makeCabalFile pkgpath mp parsedpkgs allmodnames allothermodnames extra -- "" -- exestr
return pkgpath
|
wavewave/metapackage
|
src/Driver.hs
|
Haskell
|
bsd-2-clause
| 2,470
|
{-# LANGUAGE TypeSynonymInstances, FlexibleInstances, CPP #-}
module Network.DNS.StateBinary where
import Control.Monad.State (State, StateT)
import qualified Control.Monad.State as ST
import Control.Monad.Trans.Resource (ResourceT)
import qualified Data.Attoparsec.ByteString as A
import qualified Data.Attoparsec.Types as T
import Data.ByteString (ByteString)
import qualified Data.ByteString as BS
import Data.ByteString.Builder (Builder)
import qualified Data.ByteString.Builder as BB
import qualified Data.ByteString.Lazy.Char8 as LBS
import Data.Conduit (Sink)
import Data.Conduit.Attoparsec (sinkParser)
import Data.IntMap (IntMap)
import qualified Data.IntMap as IM
import Data.Map (Map)
import qualified Data.Map as M
import Data.Word (Word8, Word16, Word32)
import Network.DNS.Types
#if __GLASGOW_HASKELL__ < 709
import Control.Applicative ((<$>), (<*))
import Data.Monoid (Monoid, mconcat, mappend, mempty)
#endif
----------------------------------------------------------------
type SPut = State WState Builder
data WState = WState {
wsDomain :: Map Domain Int
, wsPosition :: Int
}
initialWState :: WState
initialWState = WState M.empty 0
instance Monoid SPut where
mempty = return mempty
mappend a b = mconcat <$> sequence [a, b]
put8 :: Word8 -> SPut
put8 = fixedSized 1 BB.word8
put16 :: Word16 -> SPut
put16 = fixedSized 2 BB.word16BE
put32 :: Word32 -> SPut
put32 = fixedSized 4 BB.word32BE
putInt8 :: Int -> SPut
putInt8 = fixedSized 1 (BB.int8 . fromIntegral)
putInt16 :: Int -> SPut
putInt16 = fixedSized 2 (BB.int16BE . fromIntegral)
putInt32 :: Int -> SPut
putInt32 = fixedSized 4 (BB.int32BE . fromIntegral)
putByteString :: ByteString -> SPut
putByteString = writeSized BS.length BB.byteString
addPositionW :: Int -> State WState ()
addPositionW n = do
(WState m cur) <- ST.get
ST.put $ WState m (cur+n)
fixedSized :: Int -> (a -> Builder) -> a -> SPut
fixedSized n f a = do addPositionW n
return (f a)
writeSized :: (a -> Int) -> (a -> Builder) -> a -> SPut
writeSized n f a = do addPositionW (n a)
return (f a)
wsPop :: Domain -> State WState (Maybe Int)
wsPop dom = do
doms <- ST.gets wsDomain
return $ M.lookup dom doms
wsPush :: Domain -> Int -> State WState ()
wsPush dom pos = do
(WState m cur) <- ST.get
ST.put $ WState (M.insert dom pos m) cur
----------------------------------------------------------------
type SGet = StateT PState (T.Parser ByteString)
data PState = PState {
psDomain :: IntMap Domain
, psPosition :: Int
}
----------------------------------------------------------------
getPosition :: SGet Int
getPosition = psPosition <$> ST.get
addPosition :: Int -> SGet ()
addPosition n = do
PState dom pos <- ST.get
ST.put $ PState dom (pos + n)
push :: Int -> Domain -> SGet ()
push n d = do
PState dom pos <- ST.get
ST.put $ PState (IM.insert n d dom) pos
pop :: Int -> SGet (Maybe Domain)
pop n = IM.lookup n . psDomain <$> ST.get
----------------------------------------------------------------
get8 :: SGet Word8
get8 = ST.lift A.anyWord8 <* addPosition 1
get16 :: SGet Word16
get16 = ST.lift getWord16be <* addPosition 2
where
word8' = fromIntegral <$> A.anyWord8
getWord16be = do
a <- word8'
b <- word8'
return $ a * 0x100 + b
get32 :: SGet Word32
get32 = ST.lift getWord32be <* addPosition 4
where
word8' = fromIntegral <$> A.anyWord8
getWord32be = do
a <- word8'
b <- word8'
c <- word8'
d <- word8'
return $ a * 0x1000000 + b * 0x10000 + c * 0x100 + d
getInt8 :: SGet Int
getInt8 = fromIntegral <$> get8
getInt16 :: SGet Int
getInt16 = fromIntegral <$> get16
getInt32 :: SGet Int
getInt32 = fromIntegral <$> get32
----------------------------------------------------------------
getNBytes :: Int -> SGet [Int]
getNBytes len = toInts <$> getNByteString len
where
toInts = map fromIntegral . BS.unpack
getNByteString :: Int -> SGet ByteString
getNByteString n = ST.lift (A.take n) <* addPosition n
----------------------------------------------------------------
initialState :: PState
initialState = PState IM.empty 0
sinkSGet :: SGet a -> Sink ByteString (ResourceT IO) (a, PState)
sinkSGet parser = sinkParser (ST.runStateT parser initialState)
runSGet :: SGet a -> ByteString -> Either String (a, PState)
runSGet parser bs = A.eitherResult $ A.parse (ST.runStateT parser initialState) bs
runSGetWithLeftovers :: SGet a -> ByteString -> Either String ((a, PState), ByteString)
runSGetWithLeftovers parser bs = toResult $ A.parse (ST.runStateT parser initialState) bs
where
toResult :: A.Result r -> Either String (r, ByteString)
toResult (A.Done i r) = Right (r, i)
toResult (A.Partial f) = toResult $ f BS.empty
toResult (A.Fail _ _ err) = Left err
runSPut :: SPut -> ByteString
runSPut = LBS.toStrict . BB.toLazyByteString . flip ST.evalState initialWState
|
greydot/dns
|
Network/DNS/StateBinary.hs
|
Haskell
|
bsd-3-clause
| 4,973
|
{-# LANGUAGE OverloadedStrings #-}
{-# LANGUAGE RecordWildCards #-}
{- |
Module: Main
Maintainer: Thomas Sutton
This module implements a command-line tool to perform formal concept analysis
on data sets.
-}
module Main where
import Control.Applicative
import qualified Data.ByteString.Lazy as BL
import Data.Csv hiding (Name, Parser)
import qualified Data.Text.Lazy.IO as T
import Options.Applicative
import System.IO
import Data.FCA
-- | Options for invocation, generally constructed from command-line options.
data Options = Options
{ optVerbose :: Bool -- ^ Be verbose.
, optHeader :: Bool -- ^ Data includes headers.
, optFormat :: Format -- ^ Input data format.
, optOutput :: Maybe String -- ^ Output to file.
, optInput :: Maybe String -- ^ Input from file.
}
deriving (Eq, Ord, Show)
-- | Formats for input data.
data Format
= EA -- ^ Data is in entity/attribute pairs.
| EAV -- ^ Data is in entity, attribute, value triples.
| Tabular -- ^ Data is in column-per-attribute.
deriving (Show, Ord, Eq)
-- | Parser 'Options' from command-line arguments.
optionsP :: Parser Options
optionsP = Options <$> pure False
<*> pure True
<*> formatP
<*> outputP
<*> inputP
where
verboseP = switch $
long "verbose"
<> short 'v'
<> help "Produce verbose output."
headerP = switch $
long "header"
<> short 'H'
<> help "Input contains headers."
outputP = option (Just <$> str) $
long "output"
<> short 'o'
<> help "Write output to FILE. (default: stdout)"
<> metavar "FILE"
<> value Nothing
inputP = argument (Just <$> str) $
metavar "FILE"
<> help "Read input from FILE. (default: stdin)"
<> value Nothing
formatP = option (eitherReader readFmt) $
long "format"
<> short 'f'
<> help "Input data format."
<> metavar "ea|eav|tab"
<> value EAV
<> showDefault
readFmt :: String -> Either String Format
readFmt "ea" = Right EA
readFmt "eav" = Right EAV
readFmt "tab" = Right Tabular
readFmt "tabular" = Right Tabular
readFmt _ = Left "Format must be: ea, eav, tabular"
-- | Open input and output handles based on command-line options.
getHandles :: Options -> IO (Handle, Handle)
getHandles Options{..} = do
inH <- maybe (return stdin) (`openFile` ReadMode) optInput
outH <- maybe (return stdout) (`openFile` WriteMode) optOutput
return (inH, outH)
-- | Get a function to read data in the specified format.
getReader :: Options -> Handle -> IO Frame
getReader opt =
case optFormat opt of
EAV -> readEAV opt
EA -> readEA opt
Tabular -> readTabular opt
where
-- | Read data in entity-attribute-value format.
readEAV :: Options -> Handle -> IO Frame
readEAV _ inH = do
input <- BL.hGetContents inH
case decode NoHeader input of
Left err -> error err
Right csv -> return $ parseEAV csv
-- | Read data in entity-attribute format.
readEA :: Options -> Handle -> IO Frame
readEA _ inH = do
input <- BL.hGetContents inH
case decode NoHeader input of
Left err -> error err
Right csv -> return $ parseEA csv
-- | Read data in tabular format.
readTabular :: Options -> Handle -> IO Frame
readTabular _ inH = do
input <- BL.hGetContents inH
case decode NoHeader input of
Left err -> error err
Right csv -> return $ parseTabular csv
main :: IO ()
main = do
opt <- execParser opts
(inH, outH) <- getHandles opt
-- Read the input data.
Frame ctx omap amap <- getReader opt inH
hClose inH
-- Run the FCA algorithm on the context.
let table = buildAETable ctx
let graph = generateGraph table omap amap
-- Output the graph.
T.hPutStrLn outH graph
hClose outH
where
opts = info (helper <*> optionsP)
( fullDesc
<> progDesc "Generate the concept lattice which describs a data set."
<> header "fca - formal concept analysis"
)
|
thsutton/fca
|
src/Main.hs
|
Haskell
|
bsd-3-clause
| 4,322
|
{-# OPTIONS_GHC -Wall #-}
module Classy.Casadi.Integrator where
import Control.Applicative ( (<$>) )
import Python.Exceptions
import Python.Interpreter
import Python.Objects
import Foreign.C.Types ( CDouble )
import qualified Classy.Convenience as CC
import Classy.State hiding ( run )
import Classy.Types
import Classy.Casadi.DAE
import Classy.Casadi.Bindings
newtype Integrator = Integrator PyObject
instance ToPyObject Integrator where toPyObject (Integrator p) = return p
instance FromPyObject Integrator where fromPyObject = return . Integrator
run :: IO ()
run = do
casadiModule <- casadiInit
dae <- mkDae casadiModule someSys
int <- mkIdasIntegrator casadiModule dae
setOption int "fsens_err_con" True
setOption int "quad_err_con" True
setOption int "abstol" (1e-12::CDouble)
setOption int "reltol" (1e-12::CDouble)
setOption int "fsens_abstol" (1e-6::CDouble)
setOption int "fsens_reltol" (1e-6::CDouble)
setOption int "asens_abstol" (1e-6::CDouble)
setOption int "asens_reltol" (1e-6::CDouble)
-- setOption int "exact_jacobian" exact_jacobian
-- setOption int "finite_difference_fsens" finite_difference_fsens
setOption int "max_num_steps" (100000 :: Integer)
setOption int "t0" (0::Integer)
setOption int "tf" (5::Integer)
-- showPyObject integrator >>= putStrLn
-- Set parameters
setParams casadiModule int [10,1,9.8]
--
-- Set inital state
setX0 casadiModule int [10, 0.5, 0, 0.1]
--
-- # Integrate
ret <- evaluate int
showPyObject ret >>= putStrLn
evaluate :: Integrator -> IO PyObject
evaluate (Integrator int) =
handlePy (\x -> ("evaluate: " ++) . show <$> formatException x >>= error) $ do
runMethodHs int "evaluate" noParms noKwParms
callMethodHs int "output" noParms noKwParms
setX0 :: CasadiModule -> Integrator -> [CDouble] -> IO ()
setX0 (CasadiModule cm) (Integrator int) x0s =
handlePy (\x -> ("setX0: " ++) . show <$> formatException x >>= error) $ do
d <- getattr cm "INTEGRATOR_X0"
x0s' <- toPyObject x0s
runMethodHs int "setInput" [x0s', d] noKwParms
setParams :: CasadiModule -> Integrator -> [CDouble] -> IO ()
setParams (CasadiModule cm) (Integrator int) ps =
handlePy (\x -> ("setParams: " ++) . show <$> formatException x >>= error) $ do
d <- getattr cm "INTEGRATOR_P"
ps' <- toPyObject ps
runMethodHs int "setInput" [ps', d] noKwParms
mkIdasIntegrator :: CasadiModule -> DAE -> IO Integrator
mkIdasIntegrator (CasadiModule casadiModule) dae =
handlePy (\x -> ("mkIdasIntegrator: " ++) . show <$> formatException x >>= error) $ do
int@(Integrator i) <- callMethodHs casadiModule "IdasIntegrator" [dae] noKwParms
runMethodHs i "init" noParms noKwParms
return int
setOption :: ToPyObject a => Integrator -> String -> a -> IO ()
setOption (Integrator i) option value =
handlePy (\x -> ("setOption: " ++) . show <$> formatException x >>= error) $ do
opt <- toPyObject option
val <- toPyObject value
runMethodHs i "setOption" [opt, val] noKwParms
someSys :: System
someSys = getSystem $ do
n <- newtonianBases
q <- addCoord "q"
r <- addCoord "r"
derivIsSpeed q
derivIsSpeed r
mass <- addParam "m"
g <- addParam "g"
tension <- addAction "T"
b <- rotY n q "B"
let r_b_n0 = CC.relativePoint N0 (CC.zVec r b)
basket <- addParticle mass r_b_n0
addForceAtCm basket (CC.zVec (mass*g) n)
addForceAtCm basket (CC.zVec (-tension) b)
|
ghorn/classy-dvda
|
src/Classy/Casadi/Integrator.hs
|
Haskell
|
bsd-3-clause
| 3,400
|
#!/usr/bin/env stack
-- stack --install-ghc runghc --package turtle
{-# LANGUAGE OverloadedStrings #-}
import Turtle
import Filesystem.Path.CurrentOS
import Data.Text
main = sh $
do m <- using $ mktempfile "." "m.pdf"
sr <- using $ mktempfile "." "sr.pdf"
let p1 = ["bbc-oca/msr/79ths.score"]
p2 = ["bbc-oca/msr/dorrator.score", "bbc-oca/msr/lexy.score"]
p1f <- encode <$> using (mktempfile "." "march.score")
p2f <- encode <$> using (mktempfile "." "s-r.score")
shells ("cat " <> (intercalate " " p1) <> " > " <> p1f) mempty
shells ("cat " <> "styles/landscape.score " <> (intercalate " " p2) <> " > " <> p2f) mempty
procs "score-writer" ["render", "--score-file", p1f, "--output-file", encode m] mempty
procs "score-writer" ["render", "--score-file", p2f, "--output-file", encode sr] mempty
shells ("/usr/local/bin/gs -dBATCH -dNOPAUSE -q -sDEVICE=pdfwrite -sOutputFile=msr.pdf " <> encode m <> " " <> encode sr) mempty
|
nkpart/score-writer
|
library/make-msr.hs
|
Haskell
|
bsd-3-clause
| 1,005
|
module Main (
main
) where
import Criterion.Config
import Criterion.Main
import System.Random
import qualified Signal.Wavelet.C1Bench as C1
import qualified Signal.Wavelet.Eval.CommonBench as EC
import qualified Signal.Wavelet.Eval1Bench as E1
import qualified Signal.Wavelet.Eval2Bench as E2
import qualified Signal.Wavelet.List.CommonBench as LC
import qualified Signal.Wavelet.List1Bench as L1
import qualified Signal.Wavelet.List2Bench as L2
import qualified Signal.Wavelet.Repa1Bench as R1
import qualified Signal.Wavelet.Repa2Bench as R2
import qualified Signal.Wavelet.Repa3Bench as R3
import qualified Signal.Wavelet.Vector1Bench as V1
import qualified Signal.Wavelet.Repa.LibraryBench as RL
main :: IO ()
main = return (mkStdGen 1232134332) >>=
defaultMainWith benchConfig (return ()) . benchmarks
benchmarks :: RandomGen g => g -> [Benchmark]
benchmarks gen =
let lsSize = 6
sigSize = 2 * 8192
lDataDwt = L1.dataDwt gen lsSize sigSize
cDataDwt = C1.dataDwt lDataDwt
rDataDwt = R1.dataDwt lDataDwt
vDataDwt = V1.dataDwt lDataDwt
cDataLattice = C1.dataLattice lDataDwt
vDataLattice = V1.dataLattice lDataDwt
rDataLattice = R1.dataLattice lDataDwt
rDataToPairs = R1.dataToPairs lDataDwt
rDataFromPairs = R1.dataFromPairs lDataDwt
rDataCslCsr = R1.dataCslCsr lDataDwt
rDataCslNCsrN = R1.dataCslNCsrN lDataDwt
rDataExtend = R2.dataExtend lDataDwt
r3DataLattice = R3.dataLattice lDataDwt
lDataLattice = LC.dataLattice lDataDwt
lDataExtend = LC.dataExtend lDataDwt
rDataCompute = RL.dataCompute lDataDwt
rDataCopy = RL.dataCopy lDataDwt
rDataExtract = RL.dataExtract lDataDwt
rDataAppend = RL.dataAppend lDataDwt
rDataBckperm = RL.dataBckperm lDataDwt
rDataMap = RL.dataMap lDataDwt
rDataTraverse = RL.dataTraverse lDataDwt
in [ -- See: Note [C/FFI criterion bug]
bgroup "DWT" . (:[]) $ bcompare
[
bench "C1 Seq" $ whnf C1.benchDwt cDataDwt
, bench "Vector1 Seq" $ whnf V1.benchDwt vDataDwt
, bench "Repa1 Seq" $ whnf R1.benchDwtS rDataDwt
, bench "Repa1 Par" $ whnf R1.benchDwtP rDataDwt
, bench "Repa2 Seq" $ whnf R2.benchDwtS rDataDwt
, bench "Repa2 Par" $ whnf R2.benchDwtP rDataDwt
, bench "Repa3 Seq" $ whnf R3.benchDwtS rDataDwt
, bench "Repa3 Par" $ whnf R3.benchDwtP rDataDwt
, bench "List1 Seq" $ nf L1.benchDwt lDataDwt
, bench "List2 Seq" $ nf L2.benchDwt lDataDwt
, bench "Eval1 Par" $ nf E1.benchDwt lDataDwt
, bench "Eval2 Par" $ nf E2.benchDwt lDataDwt
]
, bgroup "IDWT" . (:[]) $ bcompare
[
bench "C1 Seq" $ whnf C1.benchIdwt cDataDwt
, bench "Vector1 Seq" $ whnf V1.benchIdwt vDataDwt
, bench "Repa1 Seq" $ whnf R1.benchIdwtS rDataDwt
, bench "Repa1 Par" $ whnf R1.benchIdwtP rDataDwt
, bench "Repa2 Seq" $ whnf R2.benchIdwtS rDataDwt
, bench "Repa2 Par" $ whnf R2.benchIdwtP rDataDwt
, bench "Repa3 Seq" $ whnf R3.benchIdwtS rDataDwt
, bench "Repa3 Par" $ whnf R3.benchIdwtP rDataDwt
, bench "List1 Seq" $ nf L1.benchIdwt lDataDwt
, bench "List2 Seq" $ nf L2.benchIdwt lDataDwt
, bench "Eval1 Par" $ nf E1.benchIdwt lDataDwt
, bench "Eval2 Par" $ nf E2.benchIdwt lDataDwt
]
, bgroup "C1"
[
bench "Lattice Seq" $ whnf C1.benchLattice cDataLattice
]
, bgroup "Vector1"
[
bench "Lattice Seq" $ whnf V1.benchLattice vDataLattice
]
, bgroup "Repa1"
[
bench "Lattice Seq" $ whnf R1.benchLatticeS rDataLattice
, bench "Lattice Par" $ whnf R1.benchLatticeP rDataLattice
, bench "ToPairs Seq" $ whnf R1.benchToPairsS rDataToPairs
, bench "ToPairs Par" $ whnf R1.benchToPairsP rDataToPairs
, bench "FromPairs Seq" $ whnf R1.benchFromPairsS rDataFromPairs
, bench "FromPairs Par" $ whnf R1.benchFromPairsP rDataFromPairs
, bench "Csl Seq" $ whnf R1.benchCslS rDataCslCsr
, bench "Csl Par" $ whnf R1.benchCslP rDataCslCsr
, bench "CslP Seq" $ whnf R1.benchCslSP rDataCslCsr
, bench "CslP Par" $ whnf R1.benchCslPP rDataCslCsr
, bench "Csr Seq" $ whnf R1.benchCsrS rDataCslCsr
, bench "Csr Par" $ whnf R1.benchCsrP rDataCslCsr
, bench "CsrP Seq" $ whnf R1.benchCsrSP rDataCslCsr
, bench "CsrP Par" $ whnf R1.benchCsrPP rDataCslCsr
, bench "CslN Seq" $ whnf R1.benchCslNS rDataCslNCsrN
, bench "CslN Par" $ whnf R1.benchCslNP rDataCslNCsrN
, bench "CsrN Seq" $ whnf R1.benchCsrNS rDataCslNCsrN
, bench "CsrN Par" $ whnf R1.benchCsrNP rDataCslNCsrN
, bench "Lat+Frc+Csl Seq" $ whnf R1.benchLatticeForceCslS rDataLattice
, bench "Lat+Frc+Csl Par" $ whnf R1.benchLatticeForceCslP rDataLattice
, bench "Lattice+Csl Seq" $ whnf R1.benchLatticeCslS rDataLattice
, bench "Lattice+Csl Par" $ whnf R1.benchLatticeCslP rDataLattice
]
, bgroup "Repa2"
[
bench "Lattice Seq" $ whnf R2.benchLatticeS rDataLattice
, bench "Lattice Par" $ whnf R2.benchLatticeP rDataLattice
, bench "Trim+lattice Seq"$ whnf R2.benchTrimLatticeS rDataLattice
, bench "Trim+lattice Par"$ whnf R2.benchTrimLatticeP rDataLattice
, bench "ExtendFront Seq" $ whnf R2.benchExtendFrontS rDataExtend
, bench "ExtendFront Par" $ whnf R2.benchExtendFrontP rDataExtend
, bench "ExtendEnd Seq" $ whnf R2.benchExtendEndS rDataExtend
, bench "ExtendEnd Par" $ whnf R2.benchExtendEndP rDataExtend
]
, bgroup "Repa3"
[
bench "Lattice Seq" $ whnf R3.benchLatticeS r3DataLattice
, bench "Lattice Par" $ whnf R3.benchLatticeP r3DataLattice
]
, bgroup "List.Common"
[
bench "Lattice Seq" $ nf LC.benchLattice lDataLattice
, bench "ExtendFront Seq" $ nf LC.benchExtendFront lDataExtend
, bench "ExtendEnd Seq" $ nf LC.benchExtendEnd lDataExtend
]
, bgroup "Eval.Common"
[
bench "Lattice Par" $ nf EC.benchLattice lDataLattice
]
, bgroup "Repa built-in functions"
[
bench "computeS" $ whnf RL.benchComputeS rDataCompute
, bench "computeP" $ whnfIO (RL.benchComputeP rDataCompute)
, bench "copyS" $ whnf RL.benchCopyS rDataCopy
, bench "copyP" $ whnfIO (RL.benchCopyP rDataCopy)
, bench "extractS" $ whnf RL.benchExtractS rDataExtract
, bench "extractP" $ whnfIO (RL.benchExtractP rDataExtract)
, bench "appendS" $ whnf RL.benchAppendS rDataAppend
, bench "appendP" $ whnfIO (RL.benchAppendP rDataAppend)
, bench "backpermuteS" $ whnf RL.benchBckpermS rDataBckperm
, bench "backpermuteP" $ whnfIO (RL.benchBckpermP rDataBckperm)
, bench "mapS" $ whnf RL.benchMapS rDataMap
, bench "mapP" $ whnfIO (RL.benchMapP rDataMap)
, bench "traverseS" $ whnf RL.benchTraverseS rDataTraverse
, bench "traverseP" $ whnfIO (RL.benchTraverseP rDataTraverse)
]
]
benchConfig :: Config
benchConfig = defaultConfig {
cfgPerformGC = ljust True
}
-- Note [C/FFI criterion bug]
-- ~~~~~~~~~~~~~~~~~~~~~~~~~~
--
-- When benchmarking C bindings with criterion the first benchmark returns
-- correct result. All other benchmarks that use FFI estimate run time to be
-- longer. This does not happen always and seems to depend on CPU and size of
-- processed data. These are possibly cache effects. This bug does not occur on
-- some machines. If you observe any of below it means your results are affected
-- by the bug:
--
-- a) time needed to run IDWT/C1 benchmark is significantly longer than DWT/C1
-- b) C1/Lattice takes longer than Vector1/Lattice
|
jstolarek/lattice-structure-hs
|
bench/MainBenchmarkSuite.hs
|
Haskell
|
bsd-3-clause
| 8,573
|
{- PiForall language, OPLSS, Summer 2013 -}
{-# LANGUAGE TypeSynonymInstances,ExistentialQuantification,FlexibleInstances, UndecidableInstances, FlexibleContexts,
ViewPatterns, DefaultSignatures
#-}
{-# OPTIONS_GHC -Wall -fno-warn-unused-matches -fno-warn-name-shadowing #-}
-- | A Pretty Printer.
module PrettyPrint(Disp(..), D(..)) where
import Syntax
import Unbound.LocallyNameless hiding (empty,Data,Refl)
import Unbound.LocallyNameless.Alpha
import Unbound.LocallyNameless.Ops
import Control.Monad.Identity
import Control.Monad.Reader
import Text.PrettyPrint as PP
import Text.ParserCombinators.Parsec.Pos (SourcePos, sourceName, sourceLine, sourceColumn)
import Text.ParserCombinators.Parsec.Error (ParseError)
import Control.Applicative ((<$>), (<*>))
import qualified Data.Set as S
-- | The 'Disp' class governs types which can be turned into 'Doc's
class Disp d where
disp :: d -> Doc
default disp :: (Display d, Alpha d) => d -> Doc
disp = cleverDisp
-- This function tries to pretty-print terms using the lowest number in
-- the names of the variable (i.e. as close to what the user originally
-- wrote.)
cleverDisp :: (Display d, Alpha d) => d -> Doc
cleverDisp d =
runIdentity (runReaderT (display d) initDI)
instance Disp Term
instance Rep a => Disp (Name a)
instance Disp Telescope
instance Disp Pattern
instance Disp Match
instance Disp String where
disp = text
instance Disp Int where
disp = text . show
instance Disp Integer where
disp = text . show
instance Disp Double where
disp = text . show
instance Disp Float where
disp = text . show
instance Disp Char where
disp = text . show
instance Disp Bool where
disp = text . show
instance Disp a => Disp (Maybe a) where
disp (Just a) = text "Just" <+> disp a
disp Nothing = text "Nothing"
instance (Disp a, Disp b) => Disp (Either a b) where
disp (Left a) = text "Left" <+> disp a
disp (Right a) = text "Right" <+> disp a
instance Disp ParseError where
disp = text . show
instance Disp SourcePos where
disp p = text (sourceName p) <> colon <> int (sourceLine p) <>
colon <> int (sourceColumn p) <> colon
-- | Error message quoting
data D = DS String -- ^ String literal
| forall a . Disp a => DD a -- ^ Displayable value
instance Disp D where
disp (DS s) = text s
disp (DD d) = nest 2 $ disp d
-- might be a hack to do the nesting here???
instance Disp [D] where
disp dl = sep $ map disp dl
-------------------------------------------------------------------------
-- Modules and Decls
-------------------------------------------------------------------------
instance Disp Module where
disp m = text "module" <+> disp (moduleName m) <+> text "where" $$
vcat (map disp (moduleImports m)) $$
disp (moduleEntries m)
instance Disp ModuleImport where
disp (ModuleImport i) = text "import" <+> disp i
instance Disp [Decl] where
disp = vcat . map disp
instance Disp Decl where
disp (Def n r@(Ind bnd _)) |
name2String(fst(fst(unsafeUnbind bnd)))==name2String n = disp r
disp (Def n term) = disp n <+> text "=" <+> disp term
disp (Sig n ty) =
disp n <+> text ":" <+> disp ty
disp (Axiom n ty) =
text "axiom"
<+> disp n <+> text ":" <+> disp ty
disp (Data n params lev constructors) =
hang (text "data" <+> disp n <+> disp params
<+> colon <+> text "Type" <+> text (show lev)
<+> text "where")
2 (vcat $ map disp constructors)
instance Disp ConstructorDef where
disp (ConstructorDef _ c Empty) = text c
disp (ConstructorDef _ c tele) = text c <+> text "of" <+> disp tele
-------------------------------------------------------------------------
-- The Display class
-------------------------------------------------------------------------
-- | The data structure for information about the display
--
data DispInfo = DI
{
showAnnots :: Bool, -- ^ should we show the annotations?
dispAvoid :: S.Set AnyName -- ^ names that have been used
}
instance LFresh (Reader DispInfo) where
lfresh nm = do
let s = name2String nm
di <- ask;
return $ head (filter (\x -> AnyName x `S.notMember` (dispAvoid di))
(map (makeName s) [0..]))
getAvoids = dispAvoid <$> ask
avoid names = local upd where
upd di = di { dispAvoid =
(S.fromList names) `S.union` (dispAvoid di) }
-- | An empty 'DispInfo' context
initDI :: DispInfo
initDI = DI False S.empty
type M a = (ReaderT DispInfo Identity) a
-- | The 'Display' class is like the 'Disp' class. It qualifies
-- types that can be turned into 'Doc'. The difference is that the
-- type might need the 'DispInfo' context to control the parameters
-- of pretty-printing
class (Alpha t) => Display t where
-- | Convert a value to a 'Doc'.
display :: t -> M Doc
instance Display String where
display = return . text
instance Display Int where
display = return . text . show
instance Display Integer where
display = return . text . show
instance Display Double where
display = return . text . show
instance Display Float where
display = return . text . show
instance Display Char where
display = return . text . show
instance Display Bool where
display = return . text . show
-------------------------------------------------------------------------
-------------------------------------------------------------------------
bindParens :: Doc -> Doc
bindParens d = d
mandatoryBindParens :: Doc -> Doc
mandatoryBindParens d = parens d
instance Display Annot where
display (Annot Nothing) = return $ empty
display (Annot (Just x)) = do
st <- ask
if (showAnnots st) then
(text ":" <+>) <$> (display x)
else return $ empty
instance Display Term where
display (Var n) = display n
display (isNumeral -> Just i) = display i
display (TCon n args) = do
dn <- display n
dargs <- mapM display args
return $ dn <+> hsep dargs
display (DCon n args annot) = do
dn <- display n
dargs <- mapM display args
dannot <- display annot
return $ dn <+> hsep dargs <+> dannot
display (Type n) = if n == 0 then
return $ text "Type"
else
return $ text "Type" <+> (text $ show n)
display (TySquash t) = do
dt <- display t
return $ text "[|" <+> dt <+> text "|]"
display (Quotient t r) = do
dt <- display t
dr <- display r
return $ dt <+> text "//" <+> dr
display (QBox x (Annot mty)) = do
dx <- display x
case mty of
Nothing -> return $ text "<" <+> dx <+> text ">"
Just ty -> do
dty <- display ty
return $ text "<" <+> dx <+> text ":" <+> dty <+> text ">"
display (QElim p s rsp x) = do
dp <- display p
ds <- display s
drsp <- display rsp
dx <- display x
return $ text "expose" <+> dx <+> text "under" <+> dp <+> text "with" <+> ds <+> text "by" <+> drsp
display (Pi bnd) = do
lunbind bnd $ \((n,a), b) -> do
da <- display (unembed a)
dn <- display n
db <- display b
let lhs = mandatoryBindParens $
if (n `elem` fv b) then
(dn <+> colon <+> da)
else
da
return $ lhs <+> text "->" <+> db
display (PiC bnd) = do
lunbind bnd $ \((n,a), (c, b)) -> do
da <- display (unembed a)
dn <- display n
db <- display b
dc <- display c
let lhs = mandatoryBindParens $
if (n `elem` fv b) then
(dn <+> colon <+> da)
else
da
return $ lhs <+> text "|" <+> dc <+> text "->" <+> db
display a@(Lam b) = do
(binds, body) <- gatherBinders a
return $ hang (sep binds) 2 body
display (Smaller a b) = do
da <- display a
db <- display b
return $ da <+> text "<" <+> db
display (Trivial _) = do
return $ text "trivial"
display (Induction _ xs) = do
return $ text "induction"
display (Refl ann evidence) = do
dev <- display evidence
return $ text "refl" <+> dev
display (Ind binding annot) =
lunbind binding $ \ ((n,x),body) -> do
dn <- display n
-- return dn
dx <- display x
db <- display body
dann <- display annot
return $ text "ind" <+> dn <+> bindParens dx <+> text "="
<+> db <+> dann
display (App f x) = do
df <- display f
dx <- display x
let wrapf f = case f of
Var _ -> id
App _ _ -> id
Pos _ a -> wrapf a
Ann _ _ -> id
TrustMe _ -> id
Hole _ _ -> braces
_ -> parens
return $ wrapf f df <+> dx
display (Pos _ e) = display e
display (Let bnd) = do
lunbind bnd $ \ ((x,a) , b) -> do
da <- display (unembed a)
dx <- display x
db <- display b
return $ sep [text "let" <+> bindParens dx
<+> text "=" <+> da
<+> text "in",
db]
display (Case scrut alts annot) = do
dscrut <- display scrut
dalts <- mapM display alts
dannot <- display annot
return $ text "case" <+> dscrut <+> text "of" $$
(nest 2 $ vcat $ dalts) <+> dannot
display (Subst a b mbnd) = do
da <- display a
db <- display b
dat <- maybe (return (text "")) (\ bnd -> do
lunbind bnd $ \(xs,c) -> do
dxs <- display xs
dc <- display c
return $ text "at" <+> dxs <+> text "." <+> dc) mbnd
return $ fsep [text "subst" <+> da,
text "by" <+> db,
dat]
display (TyEq a b s t) = do
let disp' (x, Annot Nothing) = display x
disp' (x, Annot (Just ty)) = do
dx <- display x
dty <- display ty
return $ dx <+> text ":" <+> dty
da <- disp' (a, s)
db <- disp' (b, t)
return $ da <+> text "=" <+> db
display (Contra ty mty) = do
dty <- display ty
da <- display mty
return $ text "contra" <+> dty <+> da
display (Ann a b) = do
da <- display a
db <- display b
return $ parens (da <+> text ":" <+> db)
display (TrustMe ma) = do
da <- display ma
return $ text "TRUSTME" <+> da
display (Hole n (Annot mTy)) = do
dn <- display n
da <- maybe (return $ text "??") display mTy
return $ text "{" <+> dn <+> text ":" <+> da <+> text "}"
display (Sigma bnd) =
lunbind bnd $ \ ((x,unembed->tyA),tyB) -> do
dx <- display x
dA <- display tyA
dB <- display tyB
return $ text "{" <+> dx <+> text ":" <+> dA
<+> text "|" <+> dB <+> text "}"
display (Prod a b ann) = do
da <- display a
db <- display b
dann <- display ann
return $ parens (da <+> text "," <+> db) <+> dann
display (Pcase a bnd ann) = do
da <- display a
dann <- display ann
lunbind bnd $ \ ((x,y), body) -> do
dx <- display x
dy <- display y
dbody <- display body
return $ text "pcase" <+> da <+> text "of"
<+> text "(" <+> dx <+> text "," <+> dy <+> text ")"
<+> text "->" <+> dbody <+> dann
display (TyUnit) = return $ text "One"
display (TyEmpty) = return $ text "Zero"
display (LitUnit) = return $ text "tt"
instance Display Match where
display (Match bd) =
lunbind bd $ \ (pat, ubd) -> do
dpat <- display pat
dubd <- display ubd
return $ hang (dpat <+> text "->") 2 dubd
instance Display Pattern where
display (PatCon c []) = (display c)
display (PatCon c args) =
parens <$> ((<+>) <$> (display c) <*> (hsep <$> (mapM display args)))
display (PatVar x) = display x
instance Display Telescope where
display Empty = return empty
display (Cons bnd) = goTele bnd
goTele :: (IsEmbed t, Alpha t, Display t1,
Display (Embedded t), Display t2) =>
Rebind (t1, t) t2 -> M Doc
goTele bnd = do
let ((n, unembed->ty), tele) = unrebind bnd
dn <- display n
dty <- display ty
dtele <- display tele
return $ mandatoryBindParens (dn <+> colon <+> dty) <+> dtele
gatherBinders :: Term -> M ([Doc], Doc)
gatherBinders (Lam b) =
lunbind b $ \((n,unembed->ma), body) -> do
dn <- display n
dt <- display ma
(rest, body) <- gatherBinders body
return $ (text "\\" <> bindParens (dn <+> dt) <+> text "." : rest, body)
gatherBinders (Ind binding ann) =
lunbind binding $ \ ((n,x),body) -> do
dn <- display n
dx <- display x
(rest,body) <- gatherBinders body
return (text "ind" <+> dn <+> bindParens dx <+> text "=" : rest,
body)
gatherBinders body = do
db <- display body
return ([], db)
-- Assumes that all terms were opened safely earlier.
instance Rep a => Display (Name a) where
display n = return $ (text . name2String) n
instance Disp [Term] where
disp = vcat . map disp
instance Disp [(Name Term,Term)] where
disp = vcat . map disp
instance Disp (TName,Term) where
disp (n,t) = parens $ (disp n) <> comma <+> disp t
|
jonsterling/Luitzen
|
src/PrettyPrint.hs
|
Haskell
|
bsd-3-clause
| 13,162
|
-- |
-- Module : BenchmarkOps
-- Copyright : (c) 2018 Harendra Kumar
--
-- License : MIT
-- Maintainer : streamly@composewell.com
{-# LANGUAGE FlexibleContexts #-}
{-# LANGUAGE ScopedTypeVariables #-}
module NestedOps where
import Control.Exception (try)
import GHC.Exception (ErrorCall)
import qualified Streamly as S hiding (runStream)
import qualified Streamly.Prelude as S
linearCount :: Int
linearCount = 100000
-- double nested loop
nestedCount2 :: Int
-- nestedCount2 = round (fromIntegral linearCount**(1/2::Double))
nestedCount2 = 100
-- triple nested loop
nestedCount3 :: Int
nestedCount3 = round (fromIntegral linearCount**(1/3::Double))
-------------------------------------------------------------------------------
-- Stream generation and elimination
-------------------------------------------------------------------------------
type Stream m a = S.SerialT m a
{-# INLINE source #-}
source :: (S.MonadAsync m, S.IsStream t) => Int -> Int -> t m Int
source = sourceUnfoldrM
{-# INLINE sourceUnfoldrM #-}
sourceUnfoldrM :: (S.IsStream t, S.MonadAsync m) => Int -> Int -> t m Int
sourceUnfoldrM n value = S.serially $ S.unfoldrM step n
where
step cnt =
if cnt > n + value
then return Nothing
else return (Just (cnt, cnt + 1))
{-# INLINE sourceUnfoldr #-}
sourceUnfoldr :: (Monad m, S.IsStream t) => Int -> Int -> t m Int
sourceUnfoldr start n = S.unfoldr step start
where
step cnt =
if cnt > start + n
then Nothing
else Just (cnt, cnt + 1)
{-# INLINE runStream #-}
runStream :: Monad m => Stream m a -> m ()
runStream = S.drain
{-# INLINE runToList #-}
runToList :: Monad m => Stream m a -> m [a]
runToList = S.toList
-------------------------------------------------------------------------------
-- Benchmark ops
-------------------------------------------------------------------------------
{-# INLINE toNullAp #-}
toNullAp
:: (S.IsStream t, S.MonadAsync m, Monad (t m))
=> (t m Int -> S.SerialT m Int) -> Int -> m ()
toNullAp t start = runStream . t $
(+) <$> source start nestedCount2 <*> source start nestedCount2
{-# INLINE toNull #-}
toNull
:: (S.IsStream t, S.MonadAsync m, Monad (t m))
=> (t m Int -> S.SerialT m Int) -> Int -> m ()
toNull t start = runStream . t $ do
x <- source start nestedCount2
y <- source start nestedCount2
return $ x + y
{-# INLINE toNull3 #-}
toNull3
:: (S.IsStream t, S.MonadAsync m, Monad (t m))
=> (t m Int -> S.SerialT m Int) -> Int -> m ()
toNull3 t start = runStream . t $ do
x <- source start nestedCount3
y <- source start nestedCount3
z <- source start nestedCount3
return $ x + y + z
{-# INLINE toList #-}
toList
:: (S.IsStream t, S.MonadAsync m, Monad (t m))
=> (t m Int -> S.SerialT m Int) -> Int -> m [Int]
toList t start = runToList . t $ do
x <- source start nestedCount2
y <- source start nestedCount2
return $ x + y
{-# INLINE toListSome #-}
toListSome
:: (S.IsStream t, S.MonadAsync m, Monad (t m))
=> (t m Int -> S.SerialT m Int) -> Int -> m [Int]
toListSome t start =
runToList . t $ S.take 1000 $ do
x <- source start nestedCount2
y <- source start nestedCount2
return $ x + y
{-# INLINE filterAllOut #-}
filterAllOut
:: (S.IsStream t, S.MonadAsync m, Monad (t m))
=> (t m Int -> S.SerialT m Int) -> Int -> m ()
filterAllOut t start = runStream . t $ do
x <- source start nestedCount2
y <- source start nestedCount2
let s = x + y
if s < 0
then return s
else S.nil
{-# INLINE filterAllIn #-}
filterAllIn
:: (S.IsStream t, S.MonadAsync m, Monad (t m))
=> (t m Int -> S.SerialT m Int) -> Int -> m ()
filterAllIn t start = runStream . t $ do
x <- source start nestedCount2
y <- source start nestedCount2
let s = x + y
if s > 0
then return s
else S.nil
{-# INLINE filterSome #-}
filterSome
:: (S.IsStream t, S.MonadAsync m, Monad (t m))
=> (t m Int -> S.SerialT m Int) -> Int -> m ()
filterSome t start = runStream . t $ do
x <- source start nestedCount2
y <- source start nestedCount2
let s = x + y
if s > 1100000
then return s
else S.nil
{-# INLINE breakAfterSome #-}
breakAfterSome
:: (S.IsStream t, Monad (t IO))
=> (t IO Int -> S.SerialT IO Int) -> Int -> IO ()
breakAfterSome t start = do
(_ :: Either ErrorCall ()) <- try $ runStream . t $ do
x <- source start nestedCount2
y <- source start nestedCount2
let s = x + y
if s > 1100000
then error "break"
else return s
return ()
|
harendra-kumar/asyncly
|
benchmark/NestedOps.hs
|
Haskell
|
bsd-3-clause
| 4,633
|
{-# LANGUAGE OverloadedStrings #-}
-- reference to L.isPrefixOf
-- see Data.List (delete, deleteBy)
module Data.Carbonara.LazyByteString where
import qualified Data.ByteString.Char8 as S (singleton)
import qualified Data.ByteString.Internal as S (w2c,c2w)
import Data.Int (Int64)
import qualified Data.ByteString.Lazy.Internal as L (ByteString(Chunk,Empty))
import qualified Data.ByteString.Lazy.Char8 as L (ByteString, any, append, cons, drop
, dropWhile, filter, isPrefixOf, pack
, singleton, snoc, splitWith, take, takeWhile)
delete :: Char -> L.ByteString -> L.ByteString
delete _ L.Empty = L.Empty
delete c (L.Chunk x xs) = if x == c' then xs else xs -- x `L.cons` delete c' xs
where c' = S.singleton c
|
szehk/Haskell-Carbonara-Library
|
src/Data/LazyByteString.hs
|
Haskell
|
bsd-3-clause
| 757
|
{-# LANGUAGE OverloadedStrings, FlexibleContexts, PackageImports #-}
module Network.XMPiPe.Core.S2S.Client (
-- * Types and Values
Mpi(..), Jid(..), Tags(..), tagsNull, tagsType,
-- * Functions
starttls, sasl, begin, input, output,
) where
import "monads-tf" Control.Monad.State
import "monads-tf" Control.Monad.Error
import Data.Pipe
import Text.XML.Pipe
import qualified Data.ByteString as BS
import SaslClient hiding (sasl)
import qualified SaslClient as S
import Xmpp hiding (input, output)
input :: Monad m => [Xmlns] -> Pipe BS.ByteString Mpi m ()
input = inputMpi
output :: Monad m => Pipe Mpi BS.ByteString m ()
output = outputMpi
starttls :: Monad m =>
BS.ByteString -> BS.ByteString -> Pipe BS.ByteString BS.ByteString m ()
starttls fr to = inputP3 =$= processTls fr to =$= outputS
processTls :: Monad m => BS.ByteString -> BS.ByteString -> Pipe Xmpp Xmpp m ()
processTls fr to = do
yield XCDecl
yield $ XCBegin [(From, fr), (To, to), (TagRaw $ nullQ "version", "1.0")]
procTls
procTls :: Monad m => Pipe Xmpp Xmpp m ()
procTls = await >>= \mx -> case mx of
Just (XCBegin _as) -> procTls
Just (XCFeatures [FtStarttls _]) -> do
yield XCStarttls
procTls
Just XCProceed -> return ()
Just _ -> return ()
_ -> return ()
sasl :: (
MonadState m, SaslState (StateType m),
MonadError m, Error (ErrorType m) ) =>
BS.ByteString -> BS.ByteString -> Pipe BS.ByteString BS.ByteString m ()
sasl fr to = inputP3 =$= processSasl fr to =$= outputS
processSasl :: (
MonadState m, SaslState (StateType m),
MonadError m, Error (ErrorType m) ) =>
BS.ByteString -> BS.ByteString -> Pipe Xmpp Xmpp m ()
processSasl fr to = do
yield XCDecl
yield $ XCBegin [ (From, fr), (To, to), (TagRaw $ nullQ "version", "1.0")]
procSasl
procSasl :: (
MonadState m, SaslState (StateType m),
MonadError m, Error (ErrorType m)
) => Pipe Xmpp Xmpp m ()
procSasl = await >>= \mx -> case mx of
Just (XCBegin _as) -> procSasl
Just (XCFeatures [FtMechanisms ["EXTERNAL"]]) -> do
st <- lift $ gets getSaslState
lift . modify . putSaslState $ ("username", "") : st
S.sasl "EXTERNAL"
lift . modify $ putSaslState st
_ -> return ()
begin :: Monad m =>
BS.ByteString -> BS.ByteString -> Pipe BS.ByteString BS.ByteString m [Xmlns]
begin fr to = inputFeature =@= process fr to =$= outputS
process :: Monad m => BS.ByteString -> BS.ByteString -> Pipe Xmpp Xmpp m ()
process fr to = do
yield XCDecl
yield $ XCBegin [(From, fr), (To, to), (TagRaw $ nullQ "version", "1.0")]
Just (XCBegin _as) <- await
Just (XCFeatures []) <- await
_ <- await
return ()
|
YoshikuniJujo/xmpipe
|
core/Network/XMPiPe/Core/S2S/Client.hs
|
Haskell
|
bsd-3-clause
| 2,569
|
module PyHint.Message (
Message(..),
) where
import Language.Py.SrcLocation (SrcSpan)
data Message = Message String String SrcSpan deriving (Show)
|
codeq/pyhint
|
src/PyHint/Message.hs
|
Haskell
|
bsd-3-clause
| 151
|
{-# LANGUAGE PackageImports #-}
import "monads-tf" Control.Monad.Trans
import Control.Applicative
import Data.Conduit
import qualified Data.Conduit.List as CL
import Data.Conduit.Lazy
import Data.Time
times :: Int -> ConduitM i UTCTime IO ()
times 0 = return ()
times n = lift getCurrentTime >>= yield >> times (n - 1)
|
YoshikuniJujo/simple-pipe
|
try/testConduitLazy.hs
|
Haskell
|
bsd-3-clause
| 321
|
module Text.Highlighter.Lexers.Modelica (lexer) where
import qualified Text.Highlighter.Lexers.Html as Html
import Text.Regex.PCRE.Light
import Text.Highlighter.Types
lexer :: Lexer
lexer = Lexer
{ lName = "Modelica"
, lAliases = ["modelica"]
, lExtensions = [".mo"]
, lMimetypes = ["text/x-modelica"]
, lStart = root'
, lFlags = [caseless, dotall]
}
functions' :: TokenMatcher
functions' =
[ tok "(abs|acos|acosh|asin|asinh|atan|atan2|atan3|ceil|cos|cosh|cross|div|exp|floor|log|log10|mod|rem|sign|sin|sinh|size|sqrt|tan|tanh|zeros)\\b" (Arbitrary "Name" :. Arbitrary "Function")
]
classes' :: TokenMatcher
classes' =
[ tok "(block|class|connector|function|model|package|record|type)\\b" (Arbitrary "Name" :. Arbitrary "Class")
]
statements' :: TokenMatcher
statements' =
[ tokNext "\"" (Arbitrary "Literal" :. Arbitrary "String") (GoTo string')
, tok "(\\d+\\.\\d*|\\.\\d+|\\d+|\\d.)[eE][+-]?\\d+[lL]?" (Arbitrary "Literal" :. Arbitrary "Number" :. Arbitrary "Float")
, tok "(\\d+\\.\\d*|\\.\\d+)" (Arbitrary "Literal" :. Arbitrary "Number" :. Arbitrary "Float")
, tok "\\d+[Ll]?" (Arbitrary "Literal" :. Arbitrary "Number" :. Arbitrary "Integer")
, tok "[\126!%^&*+=|?:<>/-]" (Arbitrary "Operator")
, tok "[()\\[\\]{},.;]" (Arbitrary "Punctuation")
, tok "(true|false|NULL|Real|Integer|Boolean)\\b" (Arbitrary "Name" :. Arbitrary "Builtin")
, tok "([a-zA-Z_][\\w]*|'[a-zA-Z_\\+\\-\\*\\/\\^][\\w]*')(\\.([a-zA-Z_][\\w]*|'[a-zA-Z_\\+\\-\\*\\/\\^][\\w]*'))+" (Arbitrary "Name" :. Arbitrary "Class")
, tok "('[\\w\\+\\-\\*\\/\\^]+'|\\w+)" (Arbitrary "Name")
]
whitespace' :: TokenMatcher
whitespace' =
[ tok "\\n" (Arbitrary "Text")
, tok "\\s+" (Arbitrary "Text")
, tok "\\\\\\n" (Arbitrary "Text")
, tok "//(\\n|(.|\\n)*?[^\\\\]\\n)" (Arbitrary "Comment")
, tok "/(\\\\\\n)?[*](.|\\n)*?[*](\\\\\\n)?/" (Arbitrary "Comment")
]
htmlContent' :: TokenMatcher
htmlContent' =
[ tokNext "<\\s*/\\s*html\\s*>" (Arbitrary "Name" :. Arbitrary "Tag") Pop
, tok ".+?(?=<\\s*/\\s*html\\s*>)" (Using Html.lexer)
]
keywords' :: TokenMatcher
keywords' =
[ tok "(algorithm|annotation|break|connect|constant|constrainedby|discrete|each|else|elseif|elsewhen|encapsulated|enumeration|end|equation|exit|expandable|extends|external|false|final|flow|for|if|import|in|inner|input|loop|nondiscrete|outer|output|parameter|partial|protected|public|redeclare|replaceable|stream|time|then|true|when|while|within)\\b" (Arbitrary "Keyword")
]
operators' :: TokenMatcher
operators' =
[ tok "(and|assert|cardinality|change|delay|der|edge|initial|noEvent|not|or|pre|reinit|return|sample|smooth|terminal|terminate)\\b" (Arbitrary "Name" :. Arbitrary "Builtin")
]
root' :: TokenMatcher
root' =
[ anyOf whitespace'
, anyOf keywords'
, anyOf functions'
, anyOf operators'
, anyOf classes'
, tokNext "(\"<html>|<html>)" (Arbitrary "Name" :. Arbitrary "Tag") (GoTo htmlContent')
, anyOf statements'
]
string' :: TokenMatcher
string' =
[ tokNext "\"" (Arbitrary "Literal" :. Arbitrary "String") Pop
, tok "\\\\([\\\\abfnrtv\"\\']|x[a-fA-F0-9]{2,4}|[0-7]{1,3})" (Arbitrary "Literal" :. Arbitrary "String" :. Arbitrary "Escape")
, tok "[^\\\\\"\\n]+" (Arbitrary "Literal" :. Arbitrary "String")
, tok "\\\\\\n" (Arbitrary "Literal" :. Arbitrary "String")
, tok "\\\\" (Arbitrary "Literal" :. Arbitrary "String")
]
|
chemist/highlighter
|
src/Text/Highlighter/Lexers/Modelica.hs
|
Haskell
|
bsd-3-clause
| 3,475
|
module Spec.Tag where
data Tag = Tag{ tName :: String
, tAuthor :: String
, tContact :: String
}
deriving(Show)
|
oldmanmike/vulkan
|
generate/src/Spec/Tag.hs
|
Haskell
|
bsd-3-clause
| 163
|
-- |
-- Module: Scheduling
-- Description: Rule scheduling
-- Copyright: (c) 2013 Tom Hawkins & Lee Pike
--
-- Algorithms for scheduling rules in Atom
module Language.Atom.Scheduling
( schedule
, Schedule
, reportSchedule
) where
import Text.Printf
import Data.List
import Language.Atom.Analysis
import Language.Atom.Elaboration
import Language.Atom.UeMap
-- | Schedule expressed as a 'UeMap' and a list of (period, phase, rules).
type Schedule = (UeMap, [(Int, Int, [Rule])])
schedule :: [Rule] -> UeMap -> Schedule
schedule rules' mp = (mp, concatMap spread periods)
where
rules = [ r | r@(Rule _ _ _ _ _ _ _ _) <- rules' ]
-- Algorithm for assigning rules to phases for a given period
-- (assuming they aren't given an exact phase):
-- 1. List the rules by their offsets, highest first.
-- 2. If the list is empty, stop.
-- 3. Otherwise, take the head of the list and assign its phase as follows:
-- find the set of phases containing the minimum number of rules such that
-- they are at least as large as the rule's offset. Then take the smallest
-- of those phases.
-- 4. Go to (2).
-- Algorithm properties: for each period,
-- A. Each rule is scheduled no earlier than its offset.
-- B. The phase with the most rules is the minimum of all possible schedules
-- that satisfy (A).
-- XXX Check if this is true.
-- C. The sum of the difference between between each rule's offset and it's
-- scheduled phase is the minimum of all schedules satisfying (A) and (B).
spread :: (Int, [Rule]) -> [(Int, Int, [Rule])]
spread (period, rules_) =
placeRules (placeExactRules (replicate period []) exactRules)
orderedByPhase
where
(minRules,exactRules) = partition (\r -> case rulePhase r of
MinPhase _ -> True
ExactPhase _ -> False) rules_
placeExactRules :: [[Rule]] -> [Rule] -> [[Rule]]
placeExactRules ls [] = ls
placeExactRules ls (r:rst) = placeExactRules (insertAt (getPh r) r ls)
rst
orderedByPhase :: [Rule]
orderedByPhase = sortBy (\r0 r1 -> compare (getPh r1) (getPh r0)) minRules
getPh r = case rulePhase r of
MinPhase i -> i
ExactPhase i -> i
-- Initially, ls contains all the exactPhase rules. We put rules in those
-- lists according to the algorithm, and then filter out the phase-lists
-- with no rules.
placeRules :: [[Rule]] -> [Rule] -> [(Int, Int, [Rule])]
placeRules ls [] = filter (\(_,_,rls) -> not (null rls))
(zip3 (repeat period) [0..(period-1)] ls)
placeRules ls (r:rst) = placeRules (insertAt (lub r ls) r ls) rst
lub :: Rule -> [[Rule]] -> Int
lub r ls = let minI = getPh r
lub' i [] = i -- unreachable. Included to prevent missing
-- cases ghc warnings.
lub' i ls_ | (head ls_) == minimum ls_ = i
| otherwise = lub' (i+1) (tail ls_)
in lub' minI (drop minI $ map length ls)
-- Cons rule r onto the list at index i in ls.
insertAt :: Int -> Rule -> [[Rule]] -> [[Rule]]
insertAt i r ls = (take i ls) ++ ((r:(ls !! i)):(drop (i+1) ls))
periods = foldl grow [] [ (rulePeriod r, r) | r <- rules ]
grow :: [(Int, [Rule])] -> (Int, Rule) -> [(Int, [Rule])]
grow [] (a, b) = [(a, [b])]
grow ((a, bs):rest) (a', b) | a' == a = (a, b : bs) : rest
| otherwise = (a, bs) : grow rest (a', b)
-- | Generate a rule scheduling report for the given schedule.
reportSchedule :: Schedule -> String
reportSchedule (mp, schedule_) = concat
[ "Rule Scheduling Report\n\n"
, "Period Phase Exprs Rule\n"
, "------ ----- ----- ----\n"
, concatMap (reportPeriod mp) schedule_
, " -----\n"
, printf " %5i\n" $ sum $ map (ruleComplexity mp) rules
, "\n"
, "Hierarchical Expression Count\n\n"
, " Total Local Rule\n"
, " ------ ------ ----\n"
, reportUsage "" $ usage mp rules
, "\n"
]
where
rules = concat $ [ r | (_, _, r) <- schedule_ ]
reportPeriod :: UeMap -> (Int, Int, [Rule]) -> String
reportPeriod mp (period, phase, rules) = concatMap reportRule rules
where
reportRule :: Rule -> String
reportRule rule = printf "%6i %5i %5i %s\n" period phase (ruleComplexity mp rule) (show rule)
data Usage = Usage String Int [Usage] deriving Eq
instance Ord Usage where compare (Usage a _ _) (Usage b _ _) = compare a b
reportUsage :: String -> Usage -> String
reportUsage i node@(Usage name n subs) = printf " %6i %6i %s\n" (totalComplexity node) n (i ++ name) ++ concatMap (reportUsage (" " ++ i)) subs
totalComplexity :: Usage -> Int
totalComplexity (Usage _ n subs) = n + sum (map totalComplexity subs)
usage :: UeMap -> [Rule] -> Usage
usage mp = head . foldl insertUsage [] . map (usage' mp)
usage' :: UeMap -> Rule -> Usage
usage' mp rule = f $ split $ ruleName rule
where
f :: [String] -> Usage
f [] = undefined
f [name] = Usage name (ruleComplexity mp rule) []
f (name:names) = Usage name 0 [f names]
split :: String -> [String]
split "" = []
split s = a : if null b then [] else split (tail b) where (a,b) = span (/= '.') s
insertUsage :: [Usage] -> Usage -> [Usage]
insertUsage [] u = [u]
insertUsage (a@(Usage n1 i1 s1) : rest) b@(Usage n2 i2 s2) | n1 == n2 = Usage n1 (max i1 i2) (sort $ foldl insertUsage s1 s2) : rest
| otherwise = a : insertUsage rest b
|
Copilot-Language/atom_for_copilot
|
Language/Atom/Scheduling.hs
|
Haskell
|
bsd-3-clause
| 5,686
|
-- munt - cryptographic function composition
import qualified Options.Applicative as Opts
import qualified Options.Applicative.Help.Chunk as OAHC
import qualified System.IO as IO
import qualified System.Process as Proc
import Data.List (intercalate)
import Options.Applicative ((<>))
import Text.Printf (printf)
import Munt.Types
import qualified Munt.App as App
readCliOpts :: IO Options
readCliOpts =
Opts.execParser $ Opts.info (Opts.helper <*> cliOpts)
( Opts.fullDesc
<> Opts.header "munt - cryptographic function composition"
<> Opts.progDesc "Transform input with cryptographic functions."
<> Opts.footerDoc (OAHC.unChunk (OAHC.stringChunk fnDoc)) )
where
cliOpts = Options
<$> Opts.argument Opts.str
( Opts.metavar "[ sources => ] action [ -> action -> ... ]"
<> Opts.value ""
<> Opts.help "Function expression to use for transformation."
)
<*> Opts.switch
( Opts.long "test"
<> Opts.short 't'
<> Opts.help "Run tests." )
ind = 10
fnDoc = printf "Available functions:\n%s" $ intercalate "" sections
list t xs = printf " %-7s %s\n" t $ drop ind (indentedList ind 80 xs)
sections = map (uncurry list)
[ ("Encode", ["b64e", "b64d"])
, ("Format", ["bin", "dec", "hex", "unbin", "undec", "unhex"])
, ("Math", ["+", "-", "*", "/", "%", "^"])
, ("Bitwise",["and", "or", "xor", "not", "rsh", "lsh"])
, ("Util", ["id", "trace"])
, ("List", ["append", "drop", "head", "init", "last", "len", "prepend",
"reverse", "tail", "take"])
, ("Stream", ["after", "before", "bytes", "concat", "consume", "count",
"dup", "filter", "flip", "lines", "repeat", "unlines",
"unwords", "words"])
, ("Cipher", ["aes128d", "aes128e", "aes192d", "aes192e", "aes256d",
"aes256e", "bfe", "bfd", "bf64e", "bf64d", "bf128e",
"bf128d", "bf256e", "bf256d", "bf448e", "bf448d", "dese",
"desd", "deseee3e", "deseee3d", "desede3e", "desede3d",
"deseee2e", "deseee2d", "desede2e", "desede2d", "cam128e",
"cam128d"])
, ("Hash", ["blake2s256", "blake2s224", "blake2sp256", "blake2sp224",
"blake2b512", "blake2bp512", "md2", "md4", "md5", "sha1",
"sha224", "sha256", "sha384", "sha512", "sha512t256",
"sha512t224", "sha3512", "sha3384", "sha3256", "sha3224",
"keccak512", "keccak384", "keccak256", "keccak224",
"ripemd160", "skein256256", "skein256224", "skein512512",
"skein512384", "skein512256", "skein512224", "whirlpool"])
]
-- | Display a list of strings indented and wrapped to fit the given width
indentedList :: Int -> Int -> [String] -> String
indentedList indentBy width =
intercalate "\n" . reverse. foldl addTerm [indent]
where addTerm (r:rs) x =
let r' = printf "%s %s" r x
in if length r' < width then (r':rs)
else addTerm (indent:r:rs) x
indent = take indentBy $ repeat ' '
main :: IO ()
main = readCliOpts >>= \o ->
let
expression = optExpression o
testMode = optRunTests o
in do
IO.hSetBuffering IO.stdin IO.NoBuffering
IO.hSetBuffering IO.stdout IO.NoBuffering
App.evaluate expression IO.stdin IO.stdout
putStrLn ""
|
shmookey/bc-tools
|
src/munt.hs
|
Haskell
|
bsd-3-clause
| 3,474
|
{-# LANGUAGE MultiParamTypeClasses #-}
{-# LANGUAGE TypeOperators #-}
module Evaluator.Types where
import Protolude
import Evaluator.BuiltIns (builtIns)
import Evaluator.Object
import Parser.AST (Ident)
import Control.Monad.Trans.Class (MonadTrans(..))
newtype EvalError = EvalError Text
deriving (Show, Eq, Typeable)
instance Exception EvalError
newtype EvalState = EvalState EnvRef
getEnvRef :: Monad m => EvaluatorT m EnvRef
getEnvRef = do
EvalState ref <- get
return ref
setEnvRef :: Monad m => EnvRef -> EvaluatorT m ()
setEnvRef ref = put $ EvalState ref
createEmptyState :: IO EvalState
createEmptyState = EvalState <$> (emptyEnv >>= flip wrapEnv builtIns)
newtype EvaluatorT m a = EvaluatorT
{ runEvaluatorT :: StateT EvalState (ExceptT EvalError m) a }
instance Functor m => Functor (EvaluatorT m) where
fmap f (EvaluatorT e) = EvaluatorT $ fmap f e
instance Monad m => Applicative (EvaluatorT m) where
pure = EvaluatorT . pure
EvaluatorT mf <*> EvaluatorT ma = EvaluatorT $ mf <*> ma
instance Monad m => Monad (EvaluatorT m) where
EvaluatorT ma >>= f = EvaluatorT $ ma >>= runEvaluatorT . f
instance Monad m => MonadState EvalState (EvaluatorT m) where
get = EvaluatorT get
put = EvaluatorT . put
instance Monad m => MonadError EvalError (EvaluatorT m) where
throwError = EvaluatorT . throwError
EvaluatorT e `catchError` f = EvaluatorT $ e `catchError` (runEvaluatorT . f)
instance MonadTrans EvaluatorT where
lift = EvaluatorT . lift . lift
type Evaluator = EvaluatorT IO
execEvaluatorT :: Monad m => EvaluatorT m a -> EvalState -> m (Either EvalError (a, EvalState))
execEvaluatorT = (runExceptT .) . runStateT . runEvaluatorT
|
noraesae/monkey-hs
|
lib/Evaluator/Types.hs
|
Haskell
|
bsd-3-clause
| 1,702
|
{-# LANGUAGE DeriveDataTypeable, DeriveGeneric #-}
-- |
-- Module : Statistics.Distribution.ChiSquared
-- Copyright : (c) 2010 Alexey Khudyakov
-- License : BSD3
--
-- Maintainer : bos@serpentine.com
-- Stability : experimental
-- Portability : portable
--
-- The chi-squared distribution. This is a continuous probability
-- distribution of sum of squares of k independent standard normal
-- distributions. It's commonly used in statistical tests
module Statistics.Distribution.ChiSquared (
ChiSquared
-- Constructors
, chiSquared
, chiSquaredNDF
) where
import Data.Aeson (FromJSON, ToJSON)
import Data.Binary (Binary)
import Data.Data (Data, Typeable)
import GHC.Generics (Generic)
import Numeric.SpecFunctions (
incompleteGamma,invIncompleteGamma,logGamma,digamma)
import qualified Statistics.Distribution as D
import qualified System.Random.MWC.Distributions as MWC
import Data.Binary (put, get)
-- | Chi-squared distribution
newtype ChiSquared = ChiSquared Int
deriving (Eq, Read, Show, Typeable, Data, Generic)
instance FromJSON ChiSquared
instance ToJSON ChiSquared
instance Binary ChiSquared where
get = fmap ChiSquared get
put (ChiSquared x) = put x
-- | Get number of degrees of freedom
chiSquaredNDF :: ChiSquared -> Int
chiSquaredNDF (ChiSquared ndf) = ndf
-- | Construct chi-squared distribution. Number of degrees of freedom
-- must be positive.
chiSquared :: Int -> ChiSquared
chiSquared n
| n <= 0 = error $
"Statistics.Distribution.ChiSquared.chiSquared: N.D.F. must be positive. Got " ++ show n
| otherwise = ChiSquared n
instance D.Distribution ChiSquared where
cumulative = cumulative
instance D.ContDistr ChiSquared where
density = density
quantile = quantile
instance D.Mean ChiSquared where
mean (ChiSquared ndf) = fromIntegral ndf
instance D.Variance ChiSquared where
variance (ChiSquared ndf) = fromIntegral (2*ndf)
instance D.MaybeMean ChiSquared where
maybeMean = Just . D.mean
instance D.MaybeVariance ChiSquared where
maybeStdDev = Just . D.stdDev
maybeVariance = Just . D.variance
instance D.Entropy ChiSquared where
entropy (ChiSquared ndf) =
let kHalf = 0.5 * fromIntegral ndf in
kHalf
+ log 2
+ logGamma kHalf
+ (1-kHalf) * digamma kHalf
instance D.MaybeEntropy ChiSquared where
maybeEntropy = Just . D.entropy
instance D.ContGen ChiSquared where
genContVar (ChiSquared n) = MWC.chiSquare n
cumulative :: ChiSquared -> Double -> Double
cumulative chi x
| x <= 0 = 0
| otherwise = incompleteGamma (ndf/2) (x/2)
where
ndf = fromIntegral $ chiSquaredNDF chi
density :: ChiSquared -> Double -> Double
density chi x
| x <= 0 = 0
| otherwise = exp $ log x * (ndf2 - 1) - x2 - logGamma ndf2 - log 2 * ndf2
where
ndf = fromIntegral $ chiSquaredNDF chi
ndf2 = ndf/2
x2 = x/2
quantile :: ChiSquared -> Double -> Double
quantile (ChiSquared ndf) p
| p == 0 = 0
| p == 1 = 1/0
| p > 0 && p < 1 = 2 * invIncompleteGamma (fromIntegral ndf / 2) p
| otherwise =
error $ "Statistics.Distribution.ChiSquared.quantile: p must be in [0,1] range. Got: "++show p
|
fpco/statistics
|
Statistics/Distribution/ChiSquared.hs
|
Haskell
|
bsd-2-clause
| 3,226
|
{-# LANGUAGE TupleSections #-}
import CoreSyn
import CoreUtils
import Id
import Type
import MkCore
import CallArity (callArityRHS)
import MkId
import SysTools
import DynFlags
import ErrUtils
import Outputable
import TysWiredIn
import Literal
import GHC
import Control.Monad
import Control.Monad.IO.Class
import System.Environment( getArgs )
import VarSet
import PprCore
import Unique
import UniqFM
import CoreLint
import FastString
-- Build IDs. use mkTemplateLocal, more predictable than proper uniques
go, go2, x, d, n, y, z, scrutf, scruta :: Id
[go, go2, x,d, n, y, z, scrutf, scruta, f] = mkTestIds
(words "go go2 x d n y z scrutf scruta f")
[ mkFunTys [intTy, intTy] intTy
, mkFunTys [intTy, intTy] intTy
, intTy
, mkFunTys [intTy] intTy
, mkFunTys [intTy] intTy
, intTy
, intTy
, mkFunTys [boolTy] boolTy
, boolTy
, mkFunTys [intTy, intTy] intTy -- protoypical external function
]
exprs :: [(String, CoreExpr)]
exprs =
[ ("go2",) $
mkRFun go [x]
(mkLet d (mkACase (Var go `mkVarApps` [x])
(mkLams [y] $ Var y)
) $ mkLams [z] $ Var d `mkVarApps` [x]) $
go `mkLApps` [0, 0]
, ("nested_go2",) $
mkRFun go [x]
(mkLet n (mkACase (Var go `mkVarApps` [x])
(mkLams [y] $ Var y)) $
mkACase (Var n) $
mkFun go2 [y]
(mkLet d
(mkACase (Var go `mkVarApps` [x])
(mkLams [y] $ Var y) ) $
mkLams [z] $ Var d `mkVarApps` [x] )$
Var go2 `mkApps` [mkLit 1] ) $
go `mkLApps` [0, 0]
, ("d0 (go 2 would be bad)",) $
mkRFun go [x]
(mkLet d (mkACase (Var go `mkVarApps` [x])
(mkLams [y] $ Var y)
) $
mkLams [z] $ Var f `mkApps` [ Var d `mkVarApps` [x], Var d `mkVarApps` [x] ]) $
go `mkLApps` [0, 0]
, ("go2 (in case crut)",) $
mkRFun go [x]
(mkLet d (mkACase (Var go `mkVarApps` [x])
(mkLams [y] $ Var y)
) $ mkLams [z] $ Var d `mkVarApps` [x]) $
Case (go `mkLApps` [0, 0]) z intTy
[(DEFAULT, [], Var f `mkVarApps` [z,z])]
, ("go2 (in function call)",) $
mkRFun go [x]
(mkLet d (mkACase (Var go `mkVarApps` [x])
(mkLams [y] $ Var y)
) $ mkLams [z] $ Var d `mkVarApps` [x]) $
f `mkLApps` [0] `mkApps` [go `mkLApps` [0, 0]]
, ("go2 (using surrounding interesting let)",) $
mkLet n (f `mkLApps` [0]) $
mkRFun go [x]
(mkLet d (mkACase (Var go `mkVarApps` [x])
(mkLams [y] $ Var y)
) $ mkLams [z] $ Var d `mkVarApps` [x]) $
Var f `mkApps` [n `mkLApps` [0], go `mkLApps` [0, 0]]
, ("go2 (using surrounding boring let)",) $
mkLet z (mkLit 0) $
mkRFun go [x]
(mkLet d (mkACase (Var go `mkVarApps` [x])
(mkLams [y] $ Var y)
) $ mkLams [z] $ Var d `mkVarApps` [x]) $
Var f `mkApps` [Var z, go `mkLApps` [0, 0]]
, ("two calls, one from let and from body (d 1 would be bad)",) $
mkLet d (mkACase (mkLams [y] $ mkLit 0) (mkLams [y] $ mkLit 0)) $
mkFun go [x,y] (mkVarApps (Var d) [x]) $
mkApps (Var d) [mkLApps go [1,2]]
, ("a thunk in a recursion (d 1 would be bad)",) $
mkRLet n (mkACase (mkLams [y] $ mkLit 0) (Var n)) $
mkRLet d (mkACase (mkLams [y] $ mkLit 0) (Var d)) $
Var n `mkApps` [d `mkLApps` [0]]
, ("two thunks, one called multiple times (both arity 1 would be bad!)",) $
mkLet n (mkACase (mkLams [y] $ mkLit 0) (f `mkLApps` [0])) $
mkLet d (mkACase (mkLams [y] $ mkLit 0) (f `mkLApps` [0])) $
Var n `mkApps` [Var d `mkApps` [Var d `mkApps` [mkLit 0]]]
, ("two functions, not thunks",) $
mkLet go (mkLams [x] (mkACase (mkLams [y] $ mkLit 0) (Var f `mkVarApps` [x]))) $
mkLet go2 (mkLams [x] (mkACase (mkLams [y] $ mkLit 0) (Var f `mkVarApps` [x]))) $
Var go `mkApps` [go2 `mkLApps` [0,1], mkLit 0]
, ("a thunk, called multiple times via a forking recursion (d 1 would be bad!)",) $
mkLet d (mkACase (mkLams [y] $ mkLit 0) (f `mkLApps` [0])) $
mkRLet go2 (mkLams [x] (mkACase (Var go2 `mkApps` [Var go2 `mkApps` [mkLit 0, mkLit 0]]) (Var d))) $
go2 `mkLApps` [0,1]
, ("a function, one called multiple times via a forking recursion",) $
mkLet go (mkLams [x] (mkACase (mkLams [y] $ mkLit 0) (Var f `mkVarApps` [x]))) $
mkRLet go2 (mkLams [x] (mkACase (Var go2 `mkApps` [Var go2 `mkApps` [mkLit 0, mkLit 0]]) (go `mkLApps` [0]))) $
go2 `mkLApps` [0,1]
, ("two functions (recursive)",) $
mkRLet go (mkLams [x] (mkACase (mkLams [y] $ mkLit 0) (Var go `mkVarApps` [x]))) $
mkRLet go2 (mkLams [x] (mkACase (mkLams [y] $ mkLit 0) (Var go2 `mkVarApps` [x]))) $
Var go `mkApps` [go2 `mkLApps` [0,1], mkLit 0]
, ("mutual recursion (thunks), called mutiple times (both arity 1 would be bad!)",) $
Let (Rec [ (n, mkACase (mkLams [y] $ mkLit 0) (Var d))
, (d, mkACase (mkLams [y] $ mkLit 0) (Var n))]) $
Var n `mkApps` [Var d `mkApps` [Var d `mkApps` [mkLit 0]]]
, ("mutual recursion (functions), but no thunks",) $
Let (Rec [ (go, mkLams [x] (mkACase (mkLams [y] $ mkLit 0) (Var go2 `mkVarApps` [x])))
, (go2, mkLams [x] (mkACase (mkLams [y] $ mkLit 0) (Var go `mkVarApps` [x])))]) $
Var go `mkApps` [go2 `mkLApps` [0,1], mkLit 0]
, ("mutual recursion (functions), one boring (d 1 would be bad)",) $
mkLet d (f `mkLApps` [0]) $
Let (Rec [ (go, mkLams [x, y] (Var d `mkApps` [go2 `mkLApps` [1,2]]))
, (go2, mkLams [x] (mkACase (mkLams [y] $ mkLit 0) (Var go `mkVarApps` [x])))]) $
Var d `mkApps` [go2 `mkLApps` [0,1]]
, ("a thunk (non-function-type), called twice, still calls once",) $
mkLet d (f `mkLApps` [0]) $
mkLet x (d `mkLApps` [1]) $
Var f `mkVarApps` [x, x]
, ("a thunk (function type), called multiple times, still calls once",) $
mkLet d (f `mkLApps` [0]) $
mkLet n (Var f `mkApps` [d `mkLApps` [1]]) $
mkLams [x] $ Var n `mkVarApps` [x]
, ("a thunk (non-function-type), in mutual recursion, still calls once (d 1 would be good)",) $
mkLet d (f `mkLApps` [0]) $
Let (Rec [ (x, Var d `mkApps` [go `mkLApps` [1,2]])
, (go, mkLams [x] $ mkACase (mkLams [z] $ Var x) (Var go `mkVarApps` [x]) ) ]) $
Var go `mkApps` [mkLit 0, go `mkLApps` [0,1]]
, ("a thunk (non-function-type), in mutual recursion, causes many calls (d 1 would be bad)",) $
mkLet d (f `mkLApps` [0]) $
Let (Rec [ (x, Var go `mkApps` [go `mkLApps` [1,2], go `mkLApps` [1,2]])
, (go, mkLams [x] $ mkACase (Var d) (Var go `mkVarApps` [x]) ) ]) $
Var go `mkApps` [mkLit 0, go `mkLApps` [0,1]]
, ("a thunk (function type), in mutual recursion, still calls once (d 1 would be good)",) $
mkLet d (f `mkLApps` [0]) $
Let (Rec [ (n, Var go `mkApps` [d `mkLApps` [1]])
, (go, mkLams [x] $ mkACase (Var n) (Var go `mkApps` [Var n `mkVarApps` [x]]) ) ]) $
Var go `mkApps` [mkLit 0, go `mkLApps` [0,1]]
, ("a thunk (non-function-type) co-calls with the body (d 1 would be bad)",) $
mkLet d (f `mkLApps` [0]) $
mkLet x (d `mkLApps` [1]) $
Var d `mkVarApps` [x]
]
main = do
[libdir] <- getArgs
runGhc (Just libdir) $ do
getSessionDynFlags >>= setSessionDynFlags . flip gopt_set Opt_SuppressUniques
dflags <- getSessionDynFlags
liftIO $ forM_ exprs $ \(n,e) -> do
case lintExpr dflags [f,scrutf,scruta] e of
Just msg -> putMsg dflags (msg $$ text "in" <+> text n)
Nothing -> return ()
putMsg dflags (text n <> char ':')
-- liftIO $ putMsg dflags (ppr e)
let e' = callArityRHS e
let bndrs = nonDetEltsUFM (allBoundIds e')
-- It should be OK to use nonDetEltsUFM here, if it becomes a
-- problem we should use DVarSet
-- liftIO $ putMsg dflags (ppr e')
forM_ bndrs $ \v -> putMsg dflags $ nest 4 $ ppr v <+> ppr (idCallArity v)
-- Utilities
mkLApps :: Id -> [Integer] -> CoreExpr
mkLApps v = mkApps (Var v) . map mkLit
mkACase = mkIfThenElse (mkVarApps (Var scrutf) [scruta])
mkTestId :: Int -> String -> Type -> Id
mkTestId i s ty = mkSysLocal (mkFastString s) (mkBuiltinUnique i) ty
mkTestIds :: [String] -> [Type] -> [Id]
mkTestIds ns tys = zipWith3 mkTestId [0..] ns tys
mkLet :: Id -> CoreExpr -> CoreExpr -> CoreExpr
mkLet v rhs body = Let (NonRec v rhs) body
mkRLet :: Id -> CoreExpr -> CoreExpr -> CoreExpr
mkRLet v rhs body = Let (Rec [(v, rhs)]) body
mkFun :: Id -> [Id] -> CoreExpr -> CoreExpr -> CoreExpr
mkFun v xs rhs body = mkLet v (mkLams xs rhs) body
mkRFun :: Id -> [Id] -> CoreExpr -> CoreExpr -> CoreExpr
mkRFun v xs rhs body = mkRLet v (mkLams xs rhs) body
mkLit :: Integer -> CoreExpr
mkLit i = Lit (mkLitInteger i intTy)
-- Collects all let-bound IDs
allBoundIds :: CoreExpr -> VarSet
allBoundIds (Let (NonRec v rhs) body) = allBoundIds rhs `unionVarSet` allBoundIds body `extendVarSet` v
allBoundIds (Let (Rec binds) body) =
allBoundIds body `unionVarSet` unionVarSets
[ allBoundIds rhs `extendVarSet` v | (v, rhs) <- binds ]
allBoundIds (App e1 e2) = allBoundIds e1 `unionVarSet` allBoundIds e2
allBoundIds (Case scrut _ _ alts) =
allBoundIds scrut `unionVarSet` unionVarSets
[ allBoundIds e | (_, _ , e) <- alts ]
allBoundIds (Lam _ e) = allBoundIds e
allBoundIds (Tick _ e) = allBoundIds e
allBoundIds (Cast e _) = allBoundIds e
allBoundIds _ = emptyVarSet
|
olsner/ghc
|
testsuite/tests/callarity/unittest/CallArity1.hs
|
Haskell
|
bsd-3-clause
| 9,920
|
{-# LANGUAGE BangPatterns #-}
import Control.Monad
import Data.List
import StackTest
main :: IO ()
main = repl [] $ do
replCommand ":main"
line <- replGetLine
when (line /= "Hello World!")
$ error "Main module didn't load correctly."
|
AndrewRademacher/stack
|
test/integration/tests/module-added-multiple-times/Main.hs
|
Haskell
|
bsd-3-clause
| 256
|
{-
Copyright 2015 Google Inc. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
-}
{-# LANGUAGE PackageImports #-}
{-# LANGUAGE NoImplicitPrelude #-}
module GHC.Pack (module M) where
import "base" GHC.Pack as M
|
Ye-Yong-Chi/codeworld
|
codeworld-base/src/GHC/Pack.hs
|
Haskell
|
apache-2.0
| 731
|
module Main where
import Prelude hiding (lines)
import Control.Monad
import Data.IORef
import Data.Time.Clock
import Graphics.Rendering.OpenGL (($=))
import qualified Graphics.Rendering.OpenGL as GL
import qualified Graphics.UI.GLUT as GLUT
import Game
import Keyboard
import Matrix
fps :: (Fractional a) => a
fps = 1/25
initialWindowSize :: GL.Size
initialWindowSize = GL.Size 640 480
drawOneLine :: GL.Vertex2 Scalar -> GL.Vertex2 Scalar -> IO ()
drawOneLine p1 p2 = GL.renderPrimitive GL.Lines $ do GL.vertex p1; GL.vertex p2
drawLines :: [Line] -> IO ()
drawLines lines = do
GL.color (GL.Color3 1.0 1.0 1.0 :: GL.Color3 GL.GLfloat)
forM_ lines (\ (p0, p1) -> drawOneLine (v p0) (v p1))
where v = uncurry GL.Vertex2
initGL :: IO ()
initGL = do
GL.clearColor $= GL.Color4 0 0 0 0
GL.shadeModel $= GL.Flat
GL.depthFunc $= Nothing
reshape :: GLUT.ReshapeCallback
reshape size@(GL.Size w h) = do
GL.viewport $= (GL.Position 0 0, size)
GL.matrixMode $= GL.Projection
GL.loadIdentity
GL.ortho2D 0 (fromIntegral w) 0 (fromIntegral h)
frame :: UTCTime ->
IORef Keyboard -> IORef GameState ->
LogicStep ->
GLUT.TimerCallback
frame lastFrameTime keyboardRef stateRef logicStep = do
now <- getCurrentTime
let timeDiff = now `diffUTCTime` lastFrameTime
state <- readIORef stateRef
keyboard <- readIORef keyboardRef
let state' = logicStep timeDiff keyboard state
writeIORef stateRef state'
GLUT.postRedisplay Nothing
let nextFrameTime = fps `addUTCTime` lastFrameTime
waitTime = nextFrameTime `diffUTCTime` now
msWait = truncate (waitTime * 1000)
GLUT.addTimerCallback msWait (frame now keyboardRef stateRef logicStep)
displayCallback :: IORef GameState -> GLUT.DisplayCallback
displayCallback stateRef = do
state <- readIORef stateRef
GL.clear [GL.ColorBuffer]
drawLines $ getLines state
GLUT.swapBuffers
main :: IO ()
main = do
_ <- GLUT.getArgsAndInitialize
GLUT.initialDisplayMode $= [GLUT.DoubleBuffered, GLUT.RGBMode]
GLUT.initialWindowSize $= initialWindowSize
_ <- GLUT.createWindow "purewars"
initGL
GLUT.reshapeCallback $= Just reshape
now <- getCurrentTime
stateRef <- newIORef initialGameState
keyboardRef <- newIORef initialKeyboardState
GLUT.keyboardMouseCallback $= Just (keyboardCallback keyboardRef)
GLUT.displayCallback $= displayCallback stateRef
GLUT.addTimerCallback 1 (frame now keyboardRef stateRef logic)
GLUT.mainLoop
|
sordina/purewars
|
Main.hs
|
Haskell
|
bsd-3-clause
| 2,469
|
module Tuura.Concept (
module Data.Monoid,
module Tuura.Concept.Abstract,
module Tuura.Concept.Circuit,
) where
import Data.Monoid
import Tuura.Concept.Abstract
import Tuura.Concept.Circuit
|
tuura/concepts
|
src/Tuura/Concept.hs
|
Haskell
|
bsd-3-clause
| 207
|
-----------------------------------------------------------------------------
-- |
-- Module : XMonad.Actions.Plane
-- Copyright : (c) Marco Túlio Gontijo e Silva <marcot@riseup.net>,
-- Leonardo Serra <leoserra@minaslivre.org>
-- License : BSD3-style (see LICENSE)
--
-- Maintainer : Marco Túlio Gontijo e Silva <marcot@riseup.net>
-- Stability : unstable
-- Portability : unportable
--
-- This module has functions to navigate through workspaces in a bidimensional
-- manner. It allows the organization of workspaces in lines, and provides
-- functions to move and shift windows in all four directions (left, up, right
-- and down) possible in a surface.
--
-- This functionality was inspired by GNOME (finite) and KDE (infinite)
-- keybindings for workspace navigation, and by "XMonad.Actions.CycleWS" for
-- the idea of applying this approach to XMonad.
-----------------------------------------------------------------------------
module XMonad.Actions.Plane
(
-- * Usage
-- $usage
-- * Data types
Direction (..)
, Limits (..)
, Lines (..)
-- * Key bindings
, planeKeys
-- * Navigating through workspaces
, planeShift
, planeMove
)
where
import Control.Monad
import Data.List
import Data.Map hiding (split)
import Data.Maybe
import XMonad
import XMonad.StackSet hiding (workspaces)
import XMonad.Util.Run
-- $usage
-- You can use this module with the following in your @~\/.xmonad\/xmonad.hs@ file:
--
-- > import XMonad.Actions.Plane
-- >
-- > main = xmonad defaultConfig {keys = myKeys}
-- >
-- > myKeys conf = union (keys defaultConfig conf) $ myNewKeys conf
-- >
-- > myNewkeys (XConfig {modMask = modm}) = planeKeys modm (Lines 3) Finite
--
-- For detailed instructions on editing your key bindings, see
-- "XMonad.Doc.Extending#Editing_key_bindings".
-- | Direction to go in the plane.
data Direction = ToLeft | ToUp | ToRight | ToDown deriving Enum
-- | Defines the behaviour when you're trying to move out of the limits.
data Limits
= Finite -- ^ Ignore the function call, and keep in the same workspace.
| Circular -- ^ Get on the other side, like in the Snake game.
| Linear -- ^ The plan comes as a row, so it goes to the next or prev if
-- the workspaces were numbered.
deriving Eq
-- | The number of lines in which the workspaces will be arranged. It's
-- possible to use a number of lines that is not a divisor of the number of
-- workspaces, but the results are better when using a divisor. If it's not a
-- divisor, the last line will have the remaining workspaces.
data Lines
= GConf -- ^ Use @gconftool-2@ to find out the number of lines.
| Lines Int -- ^ Specify the number of lines explicitly.
-- | This is the way most people would like to use this module. It attaches the
-- 'KeyMask' passed as a parameter with 'xK_Left', 'xK_Up', 'xK_Right' and
-- 'xK_Down', associating it with 'planeMove' to the corresponding 'Direction'.
-- It also associates these bindings with 'shiftMask' to 'planeShift'.
planeKeys :: KeyMask -> Lines -> Limits -> Map (KeyMask, KeySym) (X ())
planeKeys modm ln limits =
fromList $
[ ((keyMask, keySym), function ln limits direction)
| (keySym, direction) <- zip [xK_Left .. xK_Down] $ enumFrom ToLeft
, (keyMask, function) <- [(modm, planeMove), (shiftMask .|. modm, planeShift)]
]
-- | Shift a window to the next workspace in 'Direction'. Note that this will
-- also move to the next workspace. It's a good idea to use the same 'Lines'
-- and 'Limits' for all the bindings.
planeShift :: Lines -> Limits -> Direction -> X ()
planeShift = plane shift'
shift' ::
(Eq s, Eq i, Ord a) => i -> StackSet i l a s sd -> StackSet i l a s sd
shift' area = greedyView area . shift area
-- | Move to the next workspace in 'Direction'.
planeMove :: Lines -> Limits -> Direction -> X ()
planeMove = plane greedyView
plane ::
(WorkspaceId -> WindowSet -> WindowSet) -> Lines -> Limits -> Direction ->
X ()
plane function numberLines_ limits direction = do
st <- get
xconf <- ask
numberLines <-
liftIO $
case numberLines_ of
Lines numberLines__ ->
return numberLines__
GConf ->
do
numberLines__ <-
runProcessWithInput gconftool parameters ""
case reads numberLines__ of
[(numberRead, _)] -> return numberRead
_ ->
do
trace $
"XMonad.Actions.Plane: Could not parse the output of " ++ gconftool ++
unwords parameters ++ ": " ++ numberLines__ ++ "; assuming 1."
return 1
let
notBorder :: Bool
notBorder = (replicate 2 (circular_ < currentWS) ++ replicate 2 (circular_ > currentWS)) !! fromEnum direction
circular_ :: Int
circular_ = circular currentWS
circular :: Int -> Int
circular =
[ onLine pred
, onColumn pred
, onLine succ
, onColumn succ
]
!! fromEnum direction
linear :: Int -> Int
linear =
[ onLine pred . onColumn pred
, onColumn pred . onLine pred
, onLine succ . onColumn succ
, onColumn succ . onLine succ
]
!! fromEnum direction
onLine :: (Int -> Int) -> Int -> Int
onLine f currentWS_
| line < areasLine = mod_ columns
| otherwise = mod_ areasColumn
where
line, column :: Int
(line, column) = split currentWS_
mod_ :: Int -> Int
mod_ columns_ = compose line $ mod (f column) columns_
onColumn :: (Int -> Int) -> Int -> Int
onColumn f currentWS_
| column < areasColumn || areasColumn == 0 = mod_ numberLines
| otherwise = mod_ $ pred numberLines
where
line, column :: Int
(line, column) = split currentWS_
mod_ :: Int -> Int
mod_ lines_ = compose (mod (f line) lines_) column
compose :: Int -> Int -> Int
compose line column = line * columns + column
split :: Int -> (Int, Int)
split currentWS_ =
(operation div, operation mod)
where
operation :: (Int -> Int -> Int) -> Int
operation f = f currentWS_ columns
areasLine :: Int
areasLine = div areas columns
areasColumn :: Int
areasColumn = mod areas columns
columns :: Int
columns =
if mod areas numberLines == 0 then preColumns else preColumns + 1
currentWS :: Int
currentWS = fromJust mCurrentWS
preColumns :: Int
preColumns = div areas numberLines
mCurrentWS :: Maybe Int
mCurrentWS = elemIndex (currentTag $ windowset st) areaNames
areas :: Int
areas = length areaNames
run :: (Int -> Int) -> X ()
run f = windows $ function $ areaNames !! f currentWS
areaNames :: [String]
areaNames = workspaces $ config xconf
when (isJust mCurrentWS) $
case limits of
Finite -> when notBorder $ run circular
Circular -> run circular
Linear -> if notBorder then run circular else run linear
gconftool :: String
gconftool = "gconftool-2"
parameters :: [String]
parameters = ["--get", "/apps/panel/applets/workspace_switcher_screen0/prefs/num_rows"]
|
markus1189/xmonad-contrib-710
|
XMonad/Actions/Plane.hs
|
Haskell
|
bsd-3-clause
| 7,811
|
module Main where
import Load
main = testload
|
abuiles/turbinado-blog
|
tmp/dependencies/hs-plugins-1.3.1/testsuite/pdynload/bayley1/prog/Main.hs
|
Haskell
|
bsd-3-clause
| 47
|
{-# LANGUAGE CPP, ForeignFunctionInterface #-}
module Network.Wai.Handler.Warp.SendFile (
sendFile
, readSendFile
, packHeader -- for testing
#ifndef WINDOWS
, positionRead
#endif
) where
import Control.Monad (void, when)
import Data.ByteString (ByteString)
import qualified Data.ByteString as BS
import Network.Socket (Socket)
import Network.Wai.Handler.Warp.Buffer
import Network.Wai.Handler.Warp.Types
#ifdef WINDOWS
import Data.ByteString.Internal (ByteString(..))
import Foreign.ForeignPtr (newForeignPtr_)
import Foreign.Ptr (plusPtr)
import qualified System.IO as IO
#else
# if __GLASGOW_HASKELL__ < 709
import Control.Applicative ((<$>))
# endif
import Control.Exception
import Foreign.C.Error (throwErrno)
import Foreign.C.Types
import Foreign.Ptr (Ptr, castPtr, plusPtr)
import Network.Sendfile
import Network.Wai.Handler.Warp.FdCache (openFile, closeFile)
import System.Posix.Types
#endif
----------------------------------------------------------------
-- | Function to send a file based on sendfile() for Linux\/Mac\/FreeBSD.
-- This makes use of the file descriptor cache.
-- For other OSes, this is identical to 'readSendFile'.
--
-- Since: 3.1.0
sendFile :: Socket -> Buffer -> BufSize -> (ByteString -> IO ()) -> SendFile
#ifdef SENDFILEFD
sendFile s _ _ _ fid off len act hdr = case mfid of
-- settingsFdCacheDuration is 0
Nothing -> sendfileWithHeader s path (PartOfFile off len) act hdr
Just fd -> sendfileFdWithHeader s fd (PartOfFile off len) act hdr
where
mfid = fileIdFd fid
path = fileIdPath fid
#else
sendFile _ = readSendFile
#endif
----------------------------------------------------------------
packHeader :: Buffer -> BufSize -> (ByteString -> IO ())
-> IO () -> [ByteString]
-> Int
-> IO Int
packHeader _ _ _ _ [] n = return n
packHeader buf siz send hook (bs:bss) n
| len < room = do
let dst = buf `plusPtr` n
void $ copy dst bs
packHeader buf siz send hook bss (n + len)
| otherwise = do
let dst = buf `plusPtr` n
(bs1, bs2) = BS.splitAt room bs
void $ copy dst bs1
bufferIO buf siz send
hook
packHeader buf siz send hook (bs2:bss) 0
where
len = BS.length bs
room = siz - n
mini :: Int -> Integer -> Int
mini i n
| fromIntegral i < n = i
| otherwise = fromIntegral n
-- | Function to send a file based on pread()\/send() for Unix.
-- This makes use of the file descriptor cache.
-- For Windows, this is emulated by 'Handle'.
--
-- Since: 3.1.0
#ifdef WINDOWS
readSendFile :: Buffer -> BufSize -> (ByteString -> IO ()) -> SendFile
readSendFile buf siz send fid off0 len0 hook headers = do
hn <- packHeader buf siz send hook headers 0
let room = siz - hn
buf' = buf `plusPtr` hn
IO.withBinaryFile path IO.ReadMode $ \h -> do
IO.hSeek h IO.AbsoluteSeek off0
n <- IO.hGetBufSome h buf' (mini room len0)
bufferIO buf (hn + n) send
hook
let n' = fromIntegral n
fptr <- newForeignPtr_ buf
loop h fptr (len0 - n')
where
path = fileIdPath fid
loop h fptr len
| len <= 0 = return ()
| otherwise = do
n <- IO.hGetBufSome h buf (mini siz len)
when (n /= 0) $ do
let bs = PS fptr 0 n
n' = fromIntegral n
send bs
hook
loop h fptr (len - n')
#else
readSendFile :: Buffer -> BufSize -> (ByteString -> IO ()) -> SendFile
readSendFile buf siz send fid off0 len0 hook headers =
bracket setup teardown $ \fd -> do
hn <- packHeader buf siz send hook headers 0
let room = siz - hn
buf' = buf `plusPtr` hn
n <- positionRead fd buf' (mini room len0) off0
bufferIO buf (hn + n) send
hook
let n' = fromIntegral n
loop fd (len0 - n') (off0 + n')
where
path = fileIdPath fid
setup = case fileIdFd fid of
Just fd -> return fd
Nothing -> openFile path
teardown fd = case fileIdFd fid of
Just _ -> return ()
Nothing -> closeFile fd
loop fd len off
| len <= 0 = return ()
| otherwise = do
n <- positionRead fd buf (mini siz len) off
bufferIO buf n send
let n' = fromIntegral n
hook
loop fd (len - n') (off + n')
positionRead :: Fd -> Buffer -> BufSize -> Integer -> IO Int
positionRead fd buf siz off = do
bytes <- fromIntegral <$> c_pread fd (castPtr buf) (fromIntegral siz) (fromIntegral off)
when (bytes < 0) $ throwErrno "positionRead"
return bytes
foreign import ccall unsafe "pread"
c_pread :: Fd -> Ptr CChar -> ByteCount -> FileOffset -> IO CSsize
#endif
|
utdemir/wai
|
warp/Network/Wai/Handler/Warp/SendFile.hs
|
Haskell
|
mit
| 4,692
|
{-
(c) The University of Glasgow 2006
(c) The GRASP/AQUA Project, Glasgow University, 1992-1998
-}
{-# LANGUAGE CPP #-}
module BuildTyCl (
buildSynonymTyCon,
buildFamilyTyCon,
buildAlgTyCon,
buildDataCon,
buildPatSyn,
TcMethInfo, buildClass,
distinctAbstractTyConRhs, totallyAbstractTyConRhs,
mkNewTyConRhs, mkDataTyConRhs,
newImplicitBinder
) where
#include "HsVersions.h"
import IfaceEnv
import FamInstEnv( FamInstEnvs )
import DataCon
import PatSyn
import Var
import VarSet
import BasicTypes
import Name
import MkId
import Class
import TyCon
import Type
import Id
import Coercion
import TcType
import DynFlags
import TcRnMonad
import UniqSupply
import Util
import Outputable
------------------------------------------------------
buildSynonymTyCon :: Name -> [TyVar] -> [Role]
-> Type
-> Kind -- ^ Kind of the RHS
-> TcRnIf m n TyCon
buildSynonymTyCon tc_name tvs roles rhs rhs_kind
= return (mkSynonymTyCon tc_name kind tvs roles rhs)
where kind = mkPiKinds tvs rhs_kind
buildFamilyTyCon :: Name -> [TyVar]
-> FamTyConFlav
-> Kind -- ^ Kind of the RHS
-> TyConParent
-> TcRnIf m n TyCon
buildFamilyTyCon tc_name tvs rhs rhs_kind parent
= return (mkFamilyTyCon tc_name kind tvs rhs parent)
where kind = mkPiKinds tvs rhs_kind
------------------------------------------------------
distinctAbstractTyConRhs, totallyAbstractTyConRhs :: AlgTyConRhs
distinctAbstractTyConRhs = AbstractTyCon True
totallyAbstractTyConRhs = AbstractTyCon False
mkDataTyConRhs :: [DataCon] -> AlgTyConRhs
mkDataTyConRhs cons
= DataTyCon {
data_cons = cons,
is_enum = not (null cons) && all is_enum_con cons
-- See Note [Enumeration types] in TyCon
}
where
is_enum_con con
| (_tvs, theta, arg_tys, _res) <- dataConSig con
= null theta && null arg_tys
mkNewTyConRhs :: Name -> TyCon -> DataCon -> TcRnIf m n AlgTyConRhs
-- ^ Monadic because it makes a Name for the coercion TyCon
-- We pass the Name of the parent TyCon, as well as the TyCon itself,
-- because the latter is part of a knot, whereas the former is not.
mkNewTyConRhs tycon_name tycon con
= do { co_tycon_name <- newImplicitBinder tycon_name mkNewTyCoOcc
; let co_tycon = mkNewTypeCo co_tycon_name tycon etad_tvs etad_roles etad_rhs
; traceIf (text "mkNewTyConRhs" <+> ppr co_tycon)
; return (NewTyCon { data_con = con,
nt_rhs = rhs_ty,
nt_etad_rhs = (etad_tvs, etad_rhs),
nt_co = co_tycon } ) }
-- Coreview looks through newtypes with a Nothing
-- for nt_co, or uses explicit coercions otherwise
where
tvs = tyConTyVars tycon
roles = tyConRoles tycon
inst_con_ty = applyTys (dataConUserType con) (mkTyVarTys tvs)
rhs_ty = ASSERT( isFunTy inst_con_ty ) funArgTy inst_con_ty
-- Instantiate the data con with the
-- type variables from the tycon
-- NB: a newtype DataCon has a type that must look like
-- forall tvs. <arg-ty> -> T tvs
-- Note that we *can't* use dataConInstOrigArgTys here because
-- the newtype arising from class Foo a => Bar a where {}
-- has a single argument (Foo a) that is a *type class*, so
-- dataConInstOrigArgTys returns [].
etad_tvs :: [TyVar] -- Matched lazily, so that mkNewTypeCo can
etad_roles :: [Role] -- return a TyCon without pulling on rhs_ty
etad_rhs :: Type -- See Note [Tricky iface loop] in LoadIface
(etad_tvs, etad_roles, etad_rhs) = eta_reduce (reverse tvs) (reverse roles) rhs_ty
eta_reduce :: [TyVar] -- Reversed
-> [Role] -- also reversed
-> Type -- Rhs type
-> ([TyVar], [Role], Type) -- Eta-reduced version
-- (tyvars in normal order)
eta_reduce (a:as) (_:rs) ty | Just (fun, arg) <- splitAppTy_maybe ty,
Just tv <- getTyVar_maybe arg,
tv == a,
not (a `elemVarSet` tyVarsOfType fun)
= eta_reduce as rs fun
eta_reduce tvs rs ty = (reverse tvs, reverse rs, ty)
------------------------------------------------------
buildDataCon :: FamInstEnvs
-> Name -> Bool
-> [HsBang]
-> [Name] -- Field labels
-> [TyVar] -> [TyVar] -- Univ and ext
-> [(TyVar,Type)] -- Equality spec
-> ThetaType -- Does not include the "stupid theta"
-- or the GADT equalities
-> [Type] -> Type -- Argument and result types
-> TyCon -- Rep tycon
-> TcRnIf m n DataCon
-- A wrapper for DataCon.mkDataCon that
-- a) makes the worker Id
-- b) makes the wrapper Id if necessary, including
-- allocating its unique (hence monadic)
buildDataCon fam_envs src_name declared_infix arg_stricts field_lbls
univ_tvs ex_tvs eq_spec ctxt arg_tys res_ty rep_tycon
= do { wrap_name <- newImplicitBinder src_name mkDataConWrapperOcc
; work_name <- newImplicitBinder src_name mkDataConWorkerOcc
-- This last one takes the name of the data constructor in the source
-- code, which (for Haskell source anyway) will be in the DataName name
-- space, and puts it into the VarName name space
; us <- newUniqueSupply
; dflags <- getDynFlags
; let
stupid_ctxt = mkDataConStupidTheta rep_tycon arg_tys univ_tvs
data_con = mkDataCon src_name declared_infix
arg_stricts field_lbls
univ_tvs ex_tvs eq_spec ctxt
arg_tys res_ty rep_tycon
stupid_ctxt dc_wrk dc_rep
dc_wrk = mkDataConWorkId work_name data_con
dc_rep = initUs_ us (mkDataConRep dflags fam_envs wrap_name data_con)
; return data_con }
-- The stupid context for a data constructor should be limited to
-- the type variables mentioned in the arg_tys
-- ToDo: Or functionally dependent on?
-- This whole stupid theta thing is, well, stupid.
mkDataConStupidTheta :: TyCon -> [Type] -> [TyVar] -> [PredType]
mkDataConStupidTheta tycon arg_tys univ_tvs
| null stupid_theta = [] -- The common case
| otherwise = filter in_arg_tys stupid_theta
where
tc_subst = zipTopTvSubst (tyConTyVars tycon) (mkTyVarTys univ_tvs)
stupid_theta = substTheta tc_subst (tyConStupidTheta tycon)
-- Start by instantiating the master copy of the
-- stupid theta, taken from the TyCon
arg_tyvars = tyVarsOfTypes arg_tys
in_arg_tys pred = not $ isEmptyVarSet $
tyVarsOfType pred `intersectVarSet` arg_tyvars
------------------------------------------------------
buildPatSyn :: Name -> Bool
-> (Id,Bool) -> Maybe (Id, Bool)
-> ([TyVar], ThetaType) -- ^ Univ and req
-> ([TyVar], ThetaType) -- ^ Ex and prov
-> [Type] -- ^ Argument types
-> Type -- ^ Result type
-> PatSyn
buildPatSyn src_name declared_infix matcher@(matcher_id,_) builder
(univ_tvs, req_theta) (ex_tvs, prov_theta) arg_tys pat_ty
= ASSERT((and [ univ_tvs == univ_tvs'
, ex_tvs == ex_tvs'
, pat_ty `eqType` pat_ty'
, prov_theta `eqTypes` prov_theta'
, req_theta `eqTypes` req_theta'
, arg_tys `eqTypes` arg_tys'
]))
mkPatSyn src_name declared_infix
(univ_tvs, req_theta) (ex_tvs, prov_theta)
arg_tys pat_ty
matcher builder
where
((_:univ_tvs'), req_theta', tau) = tcSplitSigmaTy $ idType matcher_id
([pat_ty', cont_sigma, _], _) = tcSplitFunTys tau
(ex_tvs', prov_theta', cont_tau) = tcSplitSigmaTy cont_sigma
(arg_tys', _) = tcSplitFunTys cont_tau
-- ------------------------------------------------------
type TcMethInfo = (Name, DefMethSpec, Type)
-- A temporary intermediate, to communicate between
-- tcClassSigs and buildClass.
buildClass :: Name -> [TyVar] -> [Role] -> ThetaType
-> [FunDep TyVar] -- Functional dependencies
-> [ClassATItem] -- Associated types
-> [TcMethInfo] -- Method info
-> ClassMinimalDef -- Minimal complete definition
-> RecFlag -- Info for type constructor
-> TcRnIf m n Class
buildClass tycon_name tvs roles sc_theta fds at_items sig_stuff mindef tc_isrec
= fixM $ \ rec_clas -> -- Only name generation inside loop
do { traceIf (text "buildClass")
; datacon_name <- newImplicitBinder tycon_name mkClassDataConOcc
-- The class name is the 'parent' for this datacon, not its tycon,
-- because one should import the class to get the binding for
-- the datacon
; op_items <- mapM (mk_op_item rec_clas) sig_stuff
-- Build the selector id and default method id
-- Make selectors for the superclasses
; sc_sel_names <- mapM (newImplicitBinder tycon_name . mkSuperDictSelOcc)
[1..length sc_theta]
; let sc_sel_ids = [ mkDictSelId sc_name rec_clas
| sc_name <- sc_sel_names]
-- We number off the Dict superclass selectors, 1, 2, 3 etc so that we
-- can construct names for the selectors. Thus
-- class (C a, C b) => D a b where ...
-- gives superclass selectors
-- D_sc1, D_sc2
-- (We used to call them D_C, but now we can have two different
-- superclasses both called C!)
; let use_newtype = isSingleton arg_tys
-- Use a newtype if the data constructor
-- (a) has exactly one value field
-- i.e. exactly one operation or superclass taken together
-- (b) that value is of lifted type (which they always are, because
-- we box equality superclasses)
-- See note [Class newtypes and equality predicates]
-- We treat the dictionary superclasses as ordinary arguments.
-- That means that in the case of
-- class C a => D a
-- we don't get a newtype with no arguments!
args = sc_sel_names ++ op_names
op_tys = [ty | (_,_,ty) <- sig_stuff]
op_names = [op | (op,_,_) <- sig_stuff]
arg_tys = sc_theta ++ op_tys
rec_tycon = classTyCon rec_clas
; dict_con <- buildDataCon (panic "buildClass: FamInstEnvs")
datacon_name
False -- Not declared infix
(map (const HsNoBang) args)
[{- No fields -}]
tvs [{- no existentials -}]
[{- No GADT equalities -}]
[{- No theta -}]
arg_tys
(mkTyConApp rec_tycon (mkTyVarTys tvs))
rec_tycon
; rhs <- if use_newtype
then mkNewTyConRhs tycon_name rec_tycon dict_con
else return (mkDataTyConRhs [dict_con])
; let { clas_kind = mkPiKinds tvs constraintKind
; tycon = mkClassTyCon tycon_name clas_kind tvs roles
rhs rec_clas tc_isrec
-- A class can be recursive, and in the case of newtypes
-- this matters. For example
-- class C a where { op :: C b => a -> b -> Int }
-- Because C has only one operation, it is represented by
-- a newtype, and it should be a *recursive* newtype.
-- [If we don't make it a recursive newtype, we'll expand the
-- newtype like a synonym, but that will lead to an infinite
-- type]
; result = mkClass tvs fds
sc_theta sc_sel_ids at_items
op_items mindef tycon
}
; traceIf (text "buildClass" <+> ppr tycon)
; return result }
where
mk_op_item :: Class -> TcMethInfo -> TcRnIf n m ClassOpItem
mk_op_item rec_clas (op_name, dm_spec, _)
= do { dm_info <- case dm_spec of
NoDM -> return NoDefMeth
GenericDM -> do { dm_name <- newImplicitBinder op_name mkGenDefMethodOcc
; return (GenDefMeth dm_name) }
VanillaDM -> do { dm_name <- newImplicitBinder op_name mkDefaultMethodOcc
; return (DefMeth dm_name) }
; return (mkDictSelId op_name rec_clas, dm_info) }
{-
Note [Class newtypes and equality predicates]
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Consider
class (a ~ F b) => C a b where
op :: a -> b
We cannot represent this by a newtype, even though it's not
existential, because there are two value fields (the equality
predicate and op. See Trac #2238
Moreover,
class (a ~ F b) => C a b where {}
Here we can't use a newtype either, even though there is only
one field, because equality predicates are unboxed, and classes
are boxed.
-}
|
forked-upstream-packages-for-ghcjs/ghc
|
compiler/iface/BuildTyCl.hs
|
Haskell
|
bsd-3-clause
| 14,205
|
<?xml version="1.0" encoding="UTF-8"?><!DOCTYPE helpset PUBLIC "-//Sun Microsystems Inc.//DTD JavaHelp HelpSet Version 2.0//EN" "http://java.sun.com/products/javahelp/helpset_2_0.dtd">
<helpset version="2.0" xml:lang="pl-PL">
<title>Passive Scan Rules | ZAP Extension</title>
<maps>
<homeID>top</homeID>
<mapref location="map.jhm"/>
</maps>
<view>
<name>TOC</name>
<label>Zawartość</label>
<type>org.zaproxy.zap.extension.help.ZapTocView</type>
<data>toc.xml</data>
</view>
<view>
<name>Index</name>
<label>Indeks</label>
<type>javax.help.IndexView</type>
<data>index.xml</data>
</view>
<view>
<name>Search</name>
<label>Szukaj</label>
<type>javax.help.SearchView</type>
<data engine="com.sun.java.help.search.DefaultSearchEngine">
JavaHelpSearch
</data>
</view>
<view>
<name>Favorites</name>
<label>Ulubione</label>
<type>javax.help.FavoritesView</type>
</view>
</helpset>
|
kingthorin/zap-extensions
|
addOns/pscanrules/src/main/javahelp/org/zaproxy/zap/extension/pscanrules/resources/help_pl_PL/helpset_pl_PL.hs
|
Haskell
|
apache-2.0
| 982
|
module Multi2 where
import Multi1
g = fib fib_gen 46
|
RefactoringTools/HaRe
|
old/testing/introThreshold/Multi2_TokOut.hs
|
Haskell
|
bsd-3-clause
| 55
|
module ListSort () where
import Language.Haskell.Liquid.Prelude
append k [] ys = k:ys
append k (x:xs) ys = x:(append k xs ys)
takeL x [] = []
takeL x (y:ys) = if (y<x) then y:(takeL x ys) else takeL x ys
takeGE x [] = []
takeGE x (y:ys) = if (y>=x) then y:(takeGE x ys) else takeGE x ys
{-@ quicksort :: (Ord a) => xs:[a] -> [a]<{\fld v -> (v < fld)}> @-}
quicksort [] = []
quicksort (x:xs) = append x xsle xsge
where xsle = quicksort (takeL x xs)
xsge = quicksort (takeGE x xs)
chk [] = liquidAssertB True
chk (x1:xs) = case xs of
[] -> liquidAssertB True
x2:xs2 -> liquidAssertB (x1 <= x2) && chk xs
rlist = map choose [1 .. 10]
bar = quicksort rlist
prop0 = chk bar
|
ssaavedra/liquidhaskell
|
tests/neg/ListQSort.hs
|
Haskell
|
bsd-3-clause
| 763
|
{-# LANGUAGE CPP, FlexibleContexts, OverloadedStrings, TupleSections, ScopedTypeVariables, ExtendedDefaultRules, LambdaCase #-}
module Main where
import Control.Applicative
import Control.Monad
import Control.Monad.IO.Class
import Control.Lens (over, _1)
import Control.Concurrent.MVar
import Control.Concurrent
import Data.Char (isLower, toLower, isDigit, isSpace)
import Data.IORef
import qualified Data.HashMap.Strict as HM
import Data.List (partition, isPrefixOf)
import Data.Maybe
import Data.Monoid
import Data.Traversable (sequenceA)
import qualified Data.ByteString as B
import Data.Text (Text)
import qualified Data.Text as T
import qualified Data.Text.IO as T
import qualified Data.Text.Lazy as TL
import Data.Time.Clock (getCurrentTime, diffUTCTime)
import Data.Time.Clock.POSIX (utcTimeToPOSIXSeconds)
import Data.Traversable (traverse)
import Filesystem (removeTree, isFile, getWorkingDirectory, createDirectory, copyFile)
import Filesystem.Path ( replaceExtension, basename, directory, extension, addExtension
, filename, addExtensions, dropExtensions)
import Filesystem.Path.CurrentOS (fromText, toText, encodeString)
import Prelude hiding (FilePath)
import qualified Prelude
import Shelly
import System.Directory (doesFileExist, getCurrentDirectory, findExecutable)
import System.Environment (getArgs, getEnv)
import System.Exit
import System.IO hiding (FilePath)
import System.IO.Error
import System.Process ( createProcess, proc, CreateProcess(..), StdStream(..)
, terminateProcess, waitForProcess, readProcessWithExitCode
, ProcessHandle )
import System.Random (randomRIO)
import System.Timeout (timeout)
import Test.Framework
import Test.Framework.Providers.HUnit (testCase)
import Test.HUnit.Base (assertBool, assertFailure, assertEqual, Assertion)
import Test.HUnit.Lang (HUnitFailure(..))
import qualified Data.Yaml as Yaml
import Data.Yaml (FromJSON(..), Value(..), (.:), (.:?), (.!=))
import Data.Default
import Foreign.C.Error (ePIPE, Errno(..))
import Control.DeepSeq
import GHC.IO.Exception(IOErrorType(..), IOException(..))
import qualified Control.Exception as C
import Text.Read (readMaybe)
import Options.Applicative
import Options.Applicative.Types
import Options.Applicative.Internal
import Options.Applicative.Help hiding ((</>), fullDesc)
import qualified Options.Applicative.Help as H
default (Text)
-- | path containing the test cases and data files
getTestDir :: FilePath -> IO FilePath
#ifdef STANDALONE
getTestDir ghcjs = do
(ec, libDir, _) <- readProcessWithExitCode (encodeString ghcjs) ["--print-libdir"] ""
when (ec /= ExitSuccess) (error "could not determine GHCJS installation directory")
let testDir = fromString (trim libDir) </> "test"
e <- doesFileExist (encodeString $ testDir </> "tests.yaml")
when (not e) (error $ "test suite not found in " ++ toStringIgnore testDir ++ ", GHCJS might have been installed without tests")
return testDir
#else
getTestDir _ = do
testDir <- (</> "test") . fromString <$> getCurrentDirectory
e <- doesFileExist (encodeString $ testDir </> "tests.yaml")
when (not e) (error $ "test suite not found in " ++ toStringIgnore testDir)
return testDir
#endif
main :: IO ()
main = shellyE . silently . withTmpDir $ liftIO . setupTests
setupTests :: FilePath -> IO ()
setupTests tmpDir = do
args <- getArgs
(testArgs, leftoverArgs) <-
case runP (runParser AllowOpts optParser args) (prefs idm) of
(Left err, _ctx) -> error ("error parsing arguments: " ++ show err)
(Right (a,l), _ctx) -> return (a,l)
when (taHelp testArgs) $ do
defaultMainWithArgs [] ["--help"] `C.catch` \(e::ExitCode) -> return ()
putStrLn $ renderHelp 80 (parserHelp (prefs idm) optParser)
exitSuccess
let ghcjs = fromString (taWithGhcjs testArgs)
ghcjsPkg = fromString (taWithGhcjsPkg testArgs)
runhaskell = fromString (taWithRunhaskell testArgs)
checkBooted ghcjs
testDir <- maybe (getTestDir ghcjs) (return . fromString) (taWithTests testArgs)
nodePgm <- checkProgram "node" (taWithNode testArgs) ["--help"]
smPgm <- checkProgram "js" (taWithSpiderMonkey testArgs) ["--help"]
jscPgm <- checkProgram "jsc" (taWithJavaScriptCore testArgs) ["--help"]
-- fixme use command line options instead
onlyOptEnv <- getEnvOpt "GHCJS_TEST_ONLYOPT"
onlyUnoptEnv <- getEnvOpt "GHCJS_TEST_ONLYUNOPT"
log <- newIORef []
let noProf = taNoProfiling testArgs
(symbs, base) <- prepareBaseBundle testDir ghcjs []
(profSymbs, profBase) <- if noProf then return (symbs, base)
else prepareBaseBundle testDir ghcjs ["-prof"]
let specFile = testDir </> if taBenchmark testArgs then "benchmarks.yaml" else "tests.yaml"
symbsFile = tmpDir </> "base.symbs"
profSymbsFile = tmpDir </> "base.p_symbs"
disUnopt = onlyOptEnv || taBenchmark testArgs
disOpt = onlyUnoptEnv
opts = TestOpts (onlyOptEnv || taBenchmark testArgs) onlyUnoptEnv noProf (taTravis testArgs) log testDir
symbsFile base
profSymbsFile profBase
ghcjs runhaskell nodePgm smPgm jscPgm
es <- doesFileExist (encodeString specFile)
when (not es) (error $ "test suite not found in " ++ toStringIgnore testDir)
ts <- B.readFile (encodeString specFile) >>=
\x -> case Yaml.decodeEither x of
Left err -> error ("error in test spec file: " ++ toStringIgnore specFile ++ "\n" ++ err)
Right t -> return t
groups <- forM (tsuiGroups ts) $ \(dir, name) ->
testGroup name <$> allTestsIn opts testDir dir
checkRequiredPackages (fromString $ taWithGhcjsPkg testArgs) (tsuiRequiredPackages ts)
B.writeFile (encodeString symbsFile) symbs
B.writeFile (encodeString profSymbsFile) profSymbs
when (disUnopt && disOpt) (putStrLn "nothing to do, optimized and unoptimized disabled")
putStrLn ("running tests in " <> toStringIgnore testDir)
defaultMainWithArgs groups leftoverArgs `C.catch` \(e::ExitCode) -> do
errs <- readIORef log
when (e /= ExitSuccess && not (null errs))
(putStrLn "\nFailed tests:" >> mapM_ putStrLn (reverse errs) >> putStrLn "")
when (e /= ExitSuccess) (C.throwIO e)
checkBooted :: FilePath -> IO ()
checkBooted ghcjs = check `C.catch` \(e::C.SomeException) -> cantRun e
where
cantRun e = do
#ifdef STANDALONE
putStrLn ("Error running GHCJS: " ++ show e)
exitFailure
#else
putStrLn ("Error running GHCJS, skipping tests:\n" ++ show e)
exitSuccess
#endif
check = do
(ec, _, _) <- readProcessWithExitCode (toStringIgnore ghcjs) ["-c", "x.hs"] ""
case ec of
(ExitFailure 87) -> do
putStrLn "GHCJS is not booted, skipping tests"
exitSuccess
_ -> return ()
-- find programs at the start so we don't try to run a nonexistent program over and over again
-- temporary workaround, process-1.2.0.0 leaks when trying to run a nonexistent program
checkProgram :: FilePath -> Maybe String -> [String] -> IO (Maybe FilePath)
checkProgram defName userName testArgs = do
let testProg p as = either (\(e::C.SomeException) -> False) (const True) <$>
C.try (readProcessWithExitCode' "/" p as "")
findExecutable (fromMaybe (encodeString defName) userName) >>= \case
Nothing | Just n <- userName -> error ("could not find program " ++ toStringIgnore defName ++ " at " ++ n)
Nothing -> return Nothing
Just p -> do
testProg p testArgs >>= \case
True -> return (Just $ fromString p)
False -> return Nothing
data TestArgs = TestArgs { taHelp :: Bool
, taWithGhcjs :: String
, taWithGhcjsPkg :: String
, taWithRunhaskell :: String
, taWithNode :: Maybe String
, taWithSpiderMonkey :: Maybe String
, taWithJavaScriptCore :: Maybe String
, taWithTests :: Maybe String
, taNoProfiling :: Bool
, taBenchmark :: Bool
, taTravis :: Bool
} deriving Show
optParser :: Parser TestArgs
optParser = TestArgs <$> switch (long "help" <> help "show help message")
<*> strOption (long "with-ghcjs" <> metavar "PROGRAM" <> value "ghcjs" <> help "ghcjs program to use")
<*> strOption (long "with-ghcjs-pkg" <> metavar "PROGRAM" <> value "ghcjs-pkg" <> help "ghcjs-pkg program to use")
<*> strOption (long "with-runhaskell" <> metavar "PROGRAM" <> value "runhaskell" <> help "runhaskell program to use")
<*> (optional . strOption) (long "with-node" <> metavar "PROGRAM" <> help "node.js program to use")
<*> (optional . strOption) (long "with-spidermonkey" <> metavar "PROGRAM" <> help "SpiderMonkey jsshell program to use")
<*> (optional . strOption) (long "with-javascriptcore" <> metavar "PROGRAM" <> help "JavaScriptCore jsc program to use")
<*> (optional . strOption) (long "with-tests" <> metavar "LOCATION" <> help "location of the test cases")
<*> switch (long "no-profiling" <> help "do not run profiling tests")
<*> switch (long "benchmark" <> help "run benchmarks instead of regression tests")
<*> switch (long "travis" <> help "use settings for running on Travis CI")
-- settings for the test suite
data TestOpts = TestOpts { disableUnopt :: Bool
, disableOpt :: Bool
, noProfiling :: Bool
, travisCI :: Bool
, failedTests :: IORef [String] -- yes it's ugly but i don't know how to get the data from test-framework
, testsuiteLocation :: FilePath
, baseSymbs :: FilePath
, baseJs :: B.ByteString
, profBaseSymbs :: FilePath
, profBaseJs :: B.ByteString
, ghcjsProgram :: FilePath
, runhaskellProgram :: FilePath
, nodeProgram :: Maybe FilePath
, spiderMonkeyProgram :: Maybe FilePath
, javaScriptCoreProgram :: Maybe FilePath
}
-- settings for a single test
data TestSettings =
TestSettings { tsDisableNode :: Bool
, tsDisableSpiderMonkey :: Bool
, tsDisableJavaScriptCore :: Bool
, tsDisableOpt :: Bool
, tsDisableUnopt :: Bool
, tsDisableTravis :: Bool
, tsDisabled :: Bool
, tsProf :: Bool -- ^ use profiling bundle
, tsCompArguments :: [String] -- ^ command line arguments to pass to compiler
, tsArguments :: [String] -- ^ command line arguments to pass to interpreter(node, js)
, tsCopyFiles :: [String] -- ^ copy these files to the dir where the test is run
} deriving (Eq, Show)
instance Default TestSettings where
def = TestSettings False False False False False False False False [] [] []
instance FromJSON TestSettings where
parseJSON (Object o) = TestSettings <$> o .:? "disableNode" .!= False
<*> o .:? "disableSpiderMonkey" .!= False
<*> o .:? "disableJavaScriptCore" .!= False
<*> o .:? "disableOpt" .!= False
<*> o .:? "disableUnopt" .!= False
<*> o .:? "disableTravis" .!= False
<*> o .:? "disabled" .!= False
<*> o .:? "prof" .!= False
<*> o .:? "compArguments" .!= []
<*> o .:? "arguments" .!= []
<*> o .:? "copyFiles" .!= []
parseJSON _ = mempty
-- testsuite description
data TestSuite =
TestSuite { tsuiGroups :: [(FilePath, String)]
, tsuiRequiredPackages :: [Text]
}
instance FromJSON TestSuite where
parseJSON (Object o) = TestSuite <$> (groups =<< o .: "groups") <*> o .: "requiredPackages"
where
groups (Object o) = sequenceA $ map (\(k,v) -> (,) <$> pure (fromText k) <*> parseJSON v) (HM.toList o)
groups _ = mempty
parseJSON _ = mempty
testCaseLog :: TestOpts -> TestName -> Assertion -> Test
testCaseLog opts name assertion = testCase name assertion'
where
assertion' = assertion `C.catch` \e@(HUnitFailure msg) -> do
let errMsg = listToMaybe (filter (not . null) (lines msg))
err = name ++ maybe "" (\x -> " (" ++ trunc (dropName x) ++ ")") errMsg
trunc xs | length xs > 43 = take 40 xs ++ "..."
| otherwise = xs
dropName xs | name `isPrefixOf` xs = drop (length name) xs
| otherwise = xs
modifyIORef (failedTests opts) (err:)
C.throwIO e
{-
run all files in path as stdio tests
tests are:
- .hs or .lhs files
- that start with a lowercase letter
-}
allTestsIn :: MonadIO m => TestOpts -> FilePath -> FilePath -> m [Test]
allTestsIn testOpts testDir groupDir = shelly $ do
cd testDir
map (stdioTest testOpts) <$> findWhen (return . isTestFile) groupDir
where
testFirstChar c = isLower c || isDigit c
isTestFile file =
(extension file == Just "hs" || extension file == Just "lhs") &&
((maybe False testFirstChar . listToMaybe . encodeString . basename $ file) ||
(basename file == "Main"))
{-
a stdio test tests two things:
stdout/stderr/exit output must be either:
- the same as filename.out/filename.err/filename.exit (if any exists)
- the same as runhaskell output (otherwise)
the javascript is run with `js' (SpiderMonkey) and `node` (v8)
if they're in $PATH.
-}
data StdioResult = StdioResult { stdioExit :: ExitCode
, stdioOut :: Text
, stdioErr :: Text
}
instance Eq StdioResult where
(StdioResult e1 ou1 er1) == (StdioResult e2 ou2 er2) =
e1 == e2 && (T.strip ou1 == T.strip ou2) && (T.strip er1 == T.strip er2)
outputLimit :: Int
outputLimit = 4096
truncLimit :: Int -> Text -> Text
truncLimit n t | T.length t >= n = T.take n t <> "\n[output truncated]"
| otherwise = t
instance Show StdioResult where
show (StdioResult ex out err) =
"\n>>> exit: " ++ show ex ++ "\n>>> stdout >>>\n" ++
(T.unpack . T.strip) (truncLimit outputLimit out) ++
"\n<<< stderr >>>\n" ++ (T.unpack . T.strip) (truncLimit outputLimit err) ++ "\n<<<\n"
stdioTest :: TestOpts -> FilePath -> Test
stdioTest testOpts file = testCaseLog testOpts (encodeString file) (stdioAssertion testOpts file)
stdioAssertion :: TestOpts -> FilePath -> Assertion
stdioAssertion testOpts file = do
putStrLn ("running test: " ++ encodeString file)
mexpected <- stdioExpected testOpts file
case mexpected of
Nothing -> putStrLn "test disabled"
Just (expected, t) -> do
actual <- runGhcjsResult testOpts file
when (null actual) (putStrLn "warning: no test results")
case t of
Nothing -> return ()
Just ms -> putStrLn ((padTo 35 $ encodeString file) ++ " - " ++ (padTo 35 "runhaskell") ++ " " ++ show ms ++ "ms")
forM_ actual $ \((a,t),d) -> do
assertEqual (encodeString file ++ ": " ++ d) expected a
putStrLn ((padTo 35 $ encodeString file) ++ " - " ++ (padTo 35 d) ++ " " ++ show t ++ "ms")
padTo :: Int -> String -> String
padTo n xs | l < n = xs ++ replicate (n-l) ' '
| otherwise = xs
where l = length xs
stdioExpected :: TestOpts -> FilePath -> IO (Maybe (StdioResult, Maybe Integer))
stdioExpected testOpts file = do
settings <- settingsFor testOpts file
if tsDisabled settings
then return Nothing
else do
xs@[mex,mout,merr] <- mapM (readFilesIfExists.(map (replaceExtension (testsuiteLocation testOpts </> file))))
[["exit"], ["stdout", "out"], ["stderr","err"]]
if any isJust xs
then return . Just $ (StdioResult (fromMaybe ExitSuccess $ readExitCode =<< mex)
(fromMaybe "" mout) (fromMaybe "" merr), Nothing)
else do
mr <- runhaskellResult testOpts settings file
case mr of
Nothing -> assertFailure "cannot run `runhaskell'" >> return undefined
Just (r,t) -> return (Just (r, Just t))
readFileIfExists :: FilePath -> IO (Maybe Text)
readFileIfExists file = do
e <- isFile file
case e of
False -> return Nothing
True -> Just <$> T.readFile (encodeString file)
readFilesIfExists :: [FilePath] -> IO (Maybe Text)
readFilesIfExists [] = return Nothing
readFilesIfExists (x:xs) = do
r <- readFileIfExists x
if (isJust r)
then return r
else readFilesIfExists xs
-- test settings
settingsFor :: TestOpts -> FilePath -> IO TestSettings
settingsFor opts file = do
e <- isFile (testsuiteLocation opts </> settingsFile)
case e of
False -> return def
True -> do
cfg <- B.readFile settingsFile'
case Yaml.decodeEither cfg of
Left err -> errDef
Right t -> return t
where
errDef = do
putStrLn $ "error in test settings: " ++ settingsFile'
putStrLn "running test with default settings"
return def
settingsFile = replaceExtension file "settings"
settingsFile' = encodeString (testsuiteLocation opts </> settingsFile)
runhaskellResult :: TestOpts
-> TestSettings
-> FilePath
-> IO (Maybe (StdioResult, Integer))
runhaskellResult testOpts settings file = do
let args = tsArguments settings
r <- runProcess (testsuiteLocation testOpts </> directory file) (runhaskellProgram testOpts)
([ "-w", encodeString $ filename file] ++ args) ""
return r
extraJsFiles :: FilePath -> IO [String]
extraJsFiles file =
let jsFile = addExtensions (dropExtensions file) ["foreign", "js"]
in do
e <- isFile jsFile
return $ if e then [encodeString jsFile] else []
runGhcjsResult :: TestOpts -> FilePath -> IO [((StdioResult, Integer), String)]
runGhcjsResult opts file = do
settings <- settingsFor opts file
if tsDisabled settings || (tsProf settings && noProfiling opts) || (tsDisableTravis settings && travisCI opts)
then return []
else do
let unopt = if disableUnopt opts || tsDisableUnopt settings then [] else [False]
opt = if disableOpt opts || tsDisableOpt settings then [] else [True]
runs = unopt ++ opt
concat <$> mapM (run settings) runs
where
run settings optimize = do
output <- outputPath
extraFiles <- extraJsFiles file
cd <- getWorkingDirectory
-- compile test
let outputExe = cd </> output </> "a"
outputExe' = outputExe <.> "jsexe"
outputBuild = cd </> output </> "build"
outputRun = outputExe' </> ("all.js"::FilePath)
input = file
desc = ", optimization: " ++ show optimize
opt = if optimize then ["-O2"] else []
extraCompArgs = tsCompArguments settings
prof = tsProf settings
compileOpts = [ "-no-rts", "-no-stats"
, "-o", encodeString outputExe
, "-odir", encodeString outputBuild
, "-hidir", encodeString outputBuild
, "-use-base" , encodeString ((if prof then profBaseSymbs else baseSymbs) opts)
, encodeString (filename input)
] ++ opt ++ extraCompArgs ++ extraFiles
args = tsArguments settings
runTestPgm name disabled getPgm pgmArgs pgmArgs'
| Just p <- getPgm opts, not (disabled settings) =
fmap (,name ++ desc) <$>
runProcess outputExe' p (pgmArgs++encodeString outputRun:pgmArgs'++args) ""
| otherwise = return Nothing
C.bracket (createDirectory False output)
(\_ -> removeTree output) $ \_ -> do -- fixme this doesn't remove the output if the test program is stopped with ctrl-c
createDirectory False outputBuild
e <- liftIO $ runProcess (testsuiteLocation opts </> directory file) (ghcjsProgram opts) compileOpts ""
case e of
Nothing -> assertFailure "cannot find ghcjs"
Just (r,_) -> do
when (stdioExit r /= ExitSuccess) (print r)
assertEqual "compile error" ExitSuccess (stdioExit r)
-- copy data files for test
forM_ (tsCopyFiles settings) $ \cfile ->
let cfile' = fromText (T.pack cfile)
in copyFile (testsuiteLocation opts </> directory file </> cfile') (outputExe' </> cfile')
-- combine files with base bundle from incremental link
[out, lib] <- mapM (B.readFile . (\x -> encodeString (outputExe' </> x)))
["out.js", "lib.js"]
let runMain = "\nh$main(h$mainZCMainzimain);\n"
B.writeFile (encodeString outputRun) $
(if prof then profBaseJs else baseJs) opts <> lib <> out <> runMain
-- run with node.js and SpiderMonkey
nodeResult <- runTestPgm "node" tsDisableNode nodeProgram ["--use_strict"] []
smResult <- runTestPgm "SpiderMonkey" tsDisableSpiderMonkey spiderMonkeyProgram ["--strict"] []
jscResult <- over (traverse . _1 . _1) unmangleJscResult <$>
runTestPgm "JavaScriptCore" tsDisableJavaScriptCore javaScriptCoreProgram [] ["--"]
return $ catMaybes [nodeResult, smResult, jscResult]
-- jsc prefixes all sderr lines with "--> " and does not let us
-- return a nonzero exit status
unmangleJscResult :: StdioResult -> StdioResult
unmangleJscResult (StdioResult exit out err)
| (x:xs) <- reverse (T.lines err)
, Just code <- T.stripPrefix "--> GHCJS JSC exit status: " x
= StdioResult (parseExit code) out (T.unlines . reverse $ map unmangle xs)
| otherwise = StdioResult exit out (T.unlines . map unmangle . T.lines $ err)
where
unmangle xs = fromMaybe xs (T.stripPrefix "--> " xs)
parseExit x = case reads (T.unpack x) of
[(0,"")] -> ExitSuccess
[(n,"")] -> ExitFailure n
_ -> ExitFailure 999
outputPath :: IO FilePath
outputPath = do
t <- (show :: Integer -> String) . round . (*1000) . utcTimeToPOSIXSeconds <$> getCurrentTime
rnd <- show <$> randomRIO (1000000::Int,9999999)
return . fromString $ "ghcjs_test_" ++ t ++ "_" ++ rnd
-- | returns Nothing if the program cannot be run
runProcess :: MonadIO m => FilePath -> FilePath -> [String] -> String -> m (Maybe (StdioResult, Integer))
runProcess workingDir pgm args input = do
before <- liftIO getCurrentTime
r <- liftIO (C.try $ timeout 180000000 (readProcessWithExitCode' (encodeString workingDir) (encodeString pgm) args input))
case r of
Left (e::C.SomeException) -> return Nothing
Right Nothing -> return (Just (StdioResult ExitSuccess "" "process killed after timeout", 0))
Right (Just (ex, out, err)) -> do
after <- liftIO getCurrentTime
return $
case ex of -- fixme is this the right way to find out that a program does not exist?
(ExitFailure 127) -> Nothing
_ ->
Just ( StdioResult ex (T.pack out) (T.pack err)
, round $ 1000 * (after `diffUTCTime` before)
)
-- modified readProcessWithExitCode with working dir
readProcessWithExitCode'
:: Prelude.FilePath -- ^ Working directory
-> Prelude.FilePath -- ^ Filename of the executable (see 'proc' for details)
-> [String] -- ^ any arguments
-> String -- ^ standard input
-> IO (ExitCode,String,String) -- ^ exitcode, stdout, stderr
readProcessWithExitCode' workingDir cmd args input = do
let cp_opts = (proc cmd args) {
std_in = CreatePipe,
std_out = CreatePipe,
std_err = CreatePipe,
cwd = Just workingDir
}
withCreateProcess cp_opts $
\(Just inh) (Just outh) (Just errh) ph -> do
out <- hGetContents outh
err <- hGetContents errh
-- fork off threads to start consuming stdout & stderr
withForkWait (C.evaluate $ rnf out) $ \waitOut ->
withForkWait (C.evaluate $ rnf err) $ \waitErr -> do
-- now write any input
unless (null input) $
ignoreSigPipe $ hPutStr inh input
-- hClose performs implicit hFlush, and thus may trigger a SIGPIPE
ignoreSigPipe $ hClose inh
-- wait on the output
waitOut
waitErr
hClose outh
hClose errh
-- wait on the process
ex <- waitForProcess ph
return (ex, out, err)
withCreateProcess
:: CreateProcess
-> (Maybe Handle -> Maybe Handle -> Maybe Handle -> ProcessHandle -> IO a)
-> IO a
withCreateProcess c action =
C.bracketOnError (createProcess c) cleanupProcess
(\(m_in, m_out, m_err, ph) -> action m_in m_out m_err ph)
cleanupProcess :: (Maybe Handle, Maybe Handle, Maybe Handle, ProcessHandle)
-> IO ()
cleanupProcess (mb_stdin, mb_stdout, mb_stderr, ph) = do
terminateProcess ph
-- Note, it's important that other threads that might be reading/writing
-- these handles also get killed off, since otherwise they might be holding
-- the handle lock and prevent us from closing, leading to deadlock.
maybe (return ()) (ignoreSigPipe . hClose) mb_stdin
maybe (return ()) hClose mb_stdout
maybe (return ()) hClose mb_stderr
-- terminateProcess does not guarantee that it terminates the process.
-- Indeed on Unix it's SIGTERM, which asks nicely but does not guarantee
-- that it stops. If it doesn't stop, we don't want to hang, so we wait
-- asynchronously using forkIO.
_ <- forkIO (waitForProcess ph >> return ())
return ()
withForkWait :: IO () -> (IO () -> IO a) -> IO a
withForkWait async body = do
waitVar <- newEmptyMVar :: IO (MVar (Either C.SomeException ()))
C.mask $ \restore -> do
tid <- forkIO $ C.try (restore async) >>= putMVar waitVar
let wait = takeMVar waitVar >>= either C.throwIO return
restore (body wait) `C.onException` killThread tid
ignoreSigPipe :: IO () -> IO ()
ignoreSigPipe = C.handle $ \e -> case e of
IOError { ioe_type = ResourceVanished
, ioe_errno = Just ioe }
| Errno ioe == ePIPE -> return ()
_ -> C.throwIO e
-------------------
{-
a mocha test changes to the directory,
runs the action, then runs `mocha'
fails if mocha exits nonzero
-}
mochaTest :: FilePath -> IO a -> IO b -> Test
mochaTest dir pre post = do
undefined
writeFileT :: FilePath -> Text -> IO ()
writeFileT fp t = T.writeFile (encodeString fp) t
readFileT :: FilePath -> IO Text
readFileT fp = T.readFile (encodeString fp)
readExitCode :: Text -> Maybe ExitCode
readExitCode = fmap convert . readMaybe . T.unpack
where
convert 0 = ExitSuccess
convert n = ExitFailure n
checkRequiredPackages :: FilePath -> [Text] -> IO ()
checkRequiredPackages ghcjsPkg requiredPackages = shelly . silently $ do
installedPackages <- T.words <$> run "ghcjs-pkg" ["list", "--simple-output"]
forM_ requiredPackages $ \pkg -> do
when (not $ any ((pkg <> "-") `T.isPrefixOf`) installedPackages) $ do
echo ("warning: package `" <> pkg <> "' is required by the test suite but is not installed")
prepareBaseBundle :: FilePath -> FilePath -> [Text] -> IO (B.ByteString, B.ByteString)
prepareBaseBundle testDir ghcjs extraArgs = shellyE . silently . sub . withTmpDir $ \tmp -> do
cp (testDir </> "TestLinkBase.hs") tmp
cp (testDir </> "TestLinkMain.hs") tmp
cd tmp
run_ ghcjs $ ["-generate-base", "TestLinkBase", "-o", "base", "TestLinkMain.hs"] ++ extraArgs
cd "base.jsexe"
[symbs, js, lib, rts] <- mapM readBinary
["out.base.symbs", "out.base.js", "lib.base.js", "rts.js"]
return (symbs, rts <> lib <> js)
getEnvMay :: String -> IO (Maybe String)
getEnvMay xs = fmap Just (getEnv xs)
`C.catch` \(_::C.SomeException) -> return Nothing
getEnvOpt :: MonadIO m => String -> m Bool
getEnvOpt xs = liftIO (maybe False ((`notElem` ["0","no"]).map toLower) <$> getEnvMay xs)
trim :: String -> String
trim = let f = dropWhile isSpace . reverse in f . f
shellyE :: Sh a -> IO a
shellyE m = do
r <- newIORef (Left undefined)
let wio r v = liftIO (writeIORef r v)
a <- shelly $ (wio r . Right =<< m) `catch_sh` \(e::C.SomeException) -> wio r (Left e)
readIORef r >>= \case
Left e -> C.throw e
Right a -> return a
toStringIgnore :: FilePath -> String
toStringIgnore = T.unpack . either id id . toText
fromString :: String -> FilePath
fromString = fromText . T.pack
|
beni55/ghcjs
|
test/TestRunner.hs
|
Haskell
|
mit
| 30,626
|
module T11167 where
data SomeException
newtype ContT r m a = ContT {runContT :: (a -> m r) -> m r}
runContT' :: ContT r m a -> (a -> m r) -> m r
runContT' = runContT
catch_ :: IO a -> (SomeException -> IO a) -> IO a
catch_ = undefined
foo :: IO ()
foo = (undefined :: ContT () IO a)
`runContT` (undefined :: a -> IO ())
`catch_` (undefined :: SomeException -> IO ())
foo' :: IO ()
foo' = (undefined :: ContT () IO a)
`runContT'` (undefined :: a -> IO ())
`catch_` (undefined :: SomeException -> IO ())
|
olsner/ghc
|
testsuite/tests/rename/should_compile/T11167.hs
|
Haskell
|
bsd-3-clause
| 542
|
{-# LANGUAGE StaticPointers #-}
module StaticPointers01 where
import GHC.StaticPtr
f0 :: StaticPtr (Int -> Int)
f0 = static g
f1 :: StaticPtr (Bool -> Bool -> Bool)
f1 = static (&&)
f2 :: StaticPtr (Bool -> Bool -> Bool)
f2 = static ((&&) . id)
g :: Int -> Int
g = id
|
ghc-android/ghc
|
testsuite/tests/typecheck/should_compile/TcStaticPointers01.hs
|
Haskell
|
bsd-3-clause
| 274
|
{-# LANGUAGE Rank2Types #-}
-- Tests subsumption for infix operators (in this case (.))
-- Broke GHC 6.4!
-- Now it breaks the impredicativity story
-- (id {a}) . (id {a}) :: a -> a
-- And (forall m. Monad m => m a) /~ IO a
module Main(main) where
foo :: (forall m. Monad m => m a) -> IO a
foo = id . id
main :: IO ()
main = foo (return ())
|
urbanslug/ghc
|
testsuite/tests/typecheck/should_run/tcrun035.hs
|
Haskell
|
bsd-3-clause
| 348
|
import Test.HUnit (Assertion, (@=?), runTestTT, Test(..))
import Control.Monad (void)
import DNA (hammingDistance)
testCase :: String -> Assertion -> Test
testCase label assertion = TestLabel label (TestCase assertion)
main :: IO ()
main = void $ runTestTT $ TestList
[ TestList hammingDistanceTests ]
hammingDistanceTests :: [Test]
hammingDistanceTests =
[ testCase "no difference between empty strands" $
0 @=? hammingDistance "" ""
, testCase "no difference between identical strands" $
0 @=? hammingDistance "GGACTGA" "GGACTGA"
, testCase "complete hamming distance in small strand" $
3 @=? hammingDistance "ACT" "GGA"
, testCase "hamming distance in off by one strand" $
19 @=? hammingDistance
"GGACGGATTCTGACCTGGACTAATTTTGGGG"
"AGGACGGATTCTGACCTGGACTAATTTTGGGG"
, testCase "small hamming distance in middle somewhere" $
1 @=? hammingDistance "GGACG" "GGTCG"
, testCase "larger distance" $
2 @=? hammingDistance "ACCAGGG" "ACTATGG"
, testCase "ignores extra length on other strand when longer" $
3 @=? hammingDistance "AAACTAGGGG" "AGGCTAGCGGTAGGAC"
, testCase "ignores extra length on original strand when longer" $
5 @=? hammingDistance "GACTACGGACAGGGTAGGGAAT" "GACATCGCACACC"
, TestLabel "does not actually shorten original strand" $
TestList $ map TestCase $
[ 1 @=? hammingDistance "AGACAACAGCCAGCCGCCGGATT" "AGGCAA"
, 1 @=? hammingDistance "AGACAACAGCCAGCCGCCGGATT" "AGGCAA"
, 4 @=? hammingDistance
"AGACAACAGCCAGCCGCCGGATT"
"AGACATCTTTCAGCCGCCGGATTAGGCAA"
, 1 @=? hammingDistance "AGACAACAGCCAGCCGCCGGATT" "AGG" ]
]
|
tfausak/exercism-solutions
|
haskell/point-mutations/point-mutations_test.hs
|
Haskell
|
mit
| 1,630
|
module TestSuites.ParserCSVSpec (spec) where
import Test.Hspec.Contrib.HUnit(fromHUnitTest)
import Test.HUnit
import HsPredictor.ParserCSV
import HsPredictor.Types
spec = fromHUnitTest $ TestList [
TestLabel ">>readMatches" test_readMatches
]
test_readMatches = TestCase $ do
let r1 = ["2012.08.24,Dortmund,Bremen,2,3,1.0,2.0,3.0"]
let r2 = ["20.08.24,Dortmund,Bremen,2,3,-1,-1,-1"]
let r3 = ["2012.08.24,Dortmund,Bremen,2,three,-1,-1,-1"]
let r4 = ["2012.08.24,Dortmund,Bremen,2,-1,-1"]
let r5 = ["2012.08.24,Dortmund,Bremen,-1,-1,1.0,2.0,3.0"]
let r6 = ["2012.08.24,Dortmund,Bremen,-1,1,1.0,2.0,3.0"]
let r7 = ["2012.08.24,Dortmund,Bremen,1,-1,-1,-1,-1"]
let r8 = ["2012.08.25,Dortmund,Bremen,1,-1,-1,-1,-1"]
assertEqual "Good input"
[Match 20120824 "Dortmund" "Bremen" 2 3 1 2 3]
(readMatches r1)
assertEqual "Wrong date format"
[]
(readMatches r2)
assertEqual "Wrong result format"
[]
(readMatches r3)
assertEqual "Not complete line"
[]
(readMatches r4)
assertEqual "Upcoming match good input"
[Match 20120824 "Dortmund" "Bremen" (-1) (-1) 1.0 2.0 3.0]
(readMatches r5)
assertEqual "Upcoming match bad1"
[]
(readMatches r6)
assertEqual "Upcoming match bad2"
[]
(readMatches r7)
assertEqual "Sort matches"
(readMatches $ r7++r8)
(readMatches $ r8++r7)
assertEqual "Sort matches"
((readMatches r7) ++ (readMatches r8))
(readMatches $ r8++r7)
|
Taketrung/HsPredictor
|
tests/TestSuites/ParserCSVSpec.hs
|
Haskell
|
mit
| 1,458
|
-- |
-- Module: BigE.TextRenderer.Font
-- Copyright: (c) 2017 Patrik Sandahl
-- Licence: MIT
-- Maintainer: Patrik Sandahl <patrik.sandahl@gmail.com>
-- Stability: experimental
-- Portability: portable
module BigE.TextRenderer.Font
( Font (..)
, fromFile
, enable
, disable
, delete
) where
import qualified BigE.TextRenderer.Parser as Parser
import BigE.TextRenderer.Types (Character (charId), Common, Info,
Page (..))
import BigE.Texture (TextureParameters (..),
defaultParams2D)
import qualified BigE.Texture as Texture
import BigE.Types (Texture, TextureFormat (..),
TextureWrap (..))
import Control.Exception (SomeException, try)
import Control.Monad.IO.Class (MonadIO, liftIO)
import Data.ByteString.Lazy.Char8 (ByteString)
import qualified Data.ByteString.Lazy.Char8 as BS
import Data.HashMap.Strict (HashMap)
import qualified Data.HashMap.Strict as HashMap
import System.FilePath
import Text.Megaparsec (parse)
-- | A loaded font.
data Font = Font
{ info :: !Info
, common :: !Common
, characters :: !(HashMap Int Character)
, fontAtlas :: !Texture
} deriving Show
-- | Read 'Font' data from file and read the referenced texture file.
fromFile :: MonadIO m => FilePath -> m (Either String Font)
fromFile filePath = do
eFnt <- liftIO $ readFontFromFile filePath
case eFnt of
Right fnt -> do
eFontAtlas <- readTextureFromFile filePath fnt
case eFontAtlas of
Right fontAtlas' ->
return $
Right Font
{ info = Parser.info fnt
, common = Parser.common fnt
, characters =
HashMap.fromList $ keyValueList (Parser.characters fnt)
, fontAtlas = fontAtlas'
}
Left err -> return $ Left err
Left err -> return $ Left err
where
keyValueList = map (\char -> (charId char, char))
-- | Enable to font. I.e. bind the texture to the given texture unit.
enable :: MonadIO m => Int -> Font -> m ()
enable unit = Texture.enable2D unit . fontAtlas
-- | Disable the font. I.e. disable the texture at the given texture unit.
disable :: MonadIO m => Int -> m ()
disable = Texture.disable2D
-- | Delete the font. I.e. delete its texture.
delete :: MonadIO m => Font -> m ()
delete = Texture.delete . fontAtlas
-- | Get a 'FontFile' from external file.
readFontFromFile :: FilePath -> IO (Either String Parser.FontFile)
readFontFromFile filePath = do
eBs <- tryRead filePath
case eBs of
Right bs ->
case parse Parser.parseFontFile filePath bs of
Right fnt -> return $ Right fnt
Left err -> return $ Left (show err)
Left e -> return $ Left (show e)
where
tryRead :: FilePath -> IO (Either SomeException ByteString)
tryRead = try . BS.readFile
-- | Get a 'Texture' from external file.
readTextureFromFile :: MonadIO m => FilePath -> Parser.FontFile
-> m (Either String Texture)
readTextureFromFile filePath fntFile = do
let fntDir = takeDirectory filePath
texFile = fntDir </> file (Parser.page fntFile)
Texture.fromFile2D texFile
defaultParams2D { format = RGBA8
, wrapS = WrapClampToEdge
, wrapT = WrapClampToEdge
}
|
psandahl/big-engine
|
src/BigE/TextRenderer/Font.hs
|
Haskell
|
mit
| 3,787
|
{-# LANGUAGE FlexibleInstances, MultiParamTypeClasses, UndecidableInstances #-}
{- |
The HList library
(C) 2004-2006, Oleg Kiselyov, Ralf Laemmel, Keean Schupke
A model of label as needed for extensible records.
Record labels are simply type-level naturals.
This models is as simple and as portable as it could be.
-}
module Data.HList.Label1 where
import Data.HList.FakePrelude
import Data.HList.Record (ShowLabel(..))
-- | Labels are type-level naturals
newtype Label x = Label x deriving Show
-- | Public constructors for labels
label :: HNat n => n -> Label n
label = Label
-- | Construct the first label
firstLabel :: Label HZero
firstLabel = label hZero
-- | Construct the next label
nextLabel ::( HNat t) => Label t -> Label (HSucc t)
nextLabel (Label n) = label (hSucc n)
-- | Equality on labels
instance HEq n n' b
=> HEq (Label n) (Label n') b
-- | Show label
instance Show n => ShowLabel (Label n)
where
showLabel (Label n) = show n
|
bjornbm/HList-classic
|
Data/HList/Label1.hs
|
Haskell
|
mit
| 991
|
{-# LANGUAGE QuasiQuotes #-}
import Here
str :: String
str = [here|test
test
test test |]
main :: IO()
main = do putStrLn str
|
Pnom/haskell-ast-pretty
|
Test/examples/QuasiQuoteLines.hs
|
Haskell
|
mit
| 132
|
module Shipper.Outputs (
startDebugOutput,
startZMQ4Output,
startRedisOutput,
) where
import Shipper.Outputs.Debug
import Shipper.Outputs.ZMQ4
import Shipper.Outputs.Redis
|
christian-marie/pill-bug
|
Shipper/Outputs.hs
|
Haskell
|
mit
| 185
|
module Analysis where
data Criticality = Maximum | Minimum | Inflection
deriving (Eq, Show, Read)
data Extremum p = Extremum
{ exPoint :: p
, exType :: Criticality
} deriving (Eq, Show)
instance Functor Extremum where
fmap f (Extremum p c) = Extremum (f p) c
extremum :: (Fractional t, Ord t) =>
(t -> t) -> (t -> t) -> (t -> t) -> t -> (t, t) -> Extremum (t, t)
extremum f f' f'' e r = Extremum (x, y) t
where x = solve f' f'' e r
y = f x
c = f'' x
t | c > e = Minimum
| c < (-e) = Maximum
| otherwise = Inflection
-- TODO: use bisection to ensure bounds
solve :: (Fractional t, Ord t) =>
(t -> t) -> (t -> t) -> t -> (t, t) -> t
solve f f' e (x0, _) = head . convergedBy e . iterate step $ x0
where step x = x - f x / f' x
dropWhile2 :: (t -> t -> Bool) -> [t] -> [t]
dropWhile2 p xs@(x : xs'@(x' : _)) = if not (p x x') then xs else dropWhile2 p xs'
dropWhile2 _ xs = xs
convergedBy :: (Num t, Ord t) => t -> [t] -> [t]
convergedBy e = dropWhile2 unconverging
where unconverging x x' = abs (x - x') >= e
|
neilmayhew/Tides
|
Analysis.hs
|
Haskell
|
mit
| 1,094
|
module Game.Client where
import Network.Simple.TCP
import Control.Concurrent.MVar
import Control.Applicative
import Game.Position
import Game.Player
import qualified Game.GameWorld as G
import qualified Game.Resources as R
import qualified Game.Unit as U
-- | Client kuvaa koko asiakasohjelman tilaa
data Client = Client {
resources :: R.Resources,
gameworld :: G.GameWorld,
mousePos :: Position,
selectedUnit :: Maybe U.Unit,
scroll :: (Float, Float),
player :: Player,
others :: [Player],
frame :: Int,
box :: MVar G.GameWorld,
socket :: Socket
}
playerNum :: Client -> Int
playerNum client = 1 + length (others client)
myTurn :: Client -> Bool
myTurn client = G.turn (gameworld client) `mod` playerNum client == teamIndex (player client)
myTeam :: Client -> Int
myTeam client = teamIndex (player client)
-- | Luo uuden clientin ja lataa sille resurssit
newClient :: MVar G.GameWorld -> Socket -> Int -> IO Client
newClient box sock idx = Client
<$> R.loadResources
<*> G.initialGameWorld
<*> return (0, 0)
<*> return Nothing
<*> return (-150, 0)
<*> return (Player "pelaaja" idx)
<*> return [Player "toinen" 3]
<*> return 0
<*> return box
<*> return sock
|
maqqr/psycho-bongo-fight
|
Game/Client.hs
|
Haskell
|
mit
| 1,318
|
{-# LANGUAGE NoImplicitPrelude #-}
{-# LANGUAGE OverloadedStrings #-}
module Views.Pages.Error (errorView) where
import BasicPrelude
import Text.Blaze.Html5 (Html, toHtml, (!))
import qualified Text.Blaze.Html5 as H
import qualified Text.Blaze.Html5.Attributes as A
import Routes (Route)
import Views.Layout (layoutView)
errorView :: Route -> Text -> Html
errorView currentRoute err =
let pageTitle = "Error"
in layoutView currentRoute pageTitle $ do
H.div ! A.class_ "page-header" $ H.h1 "Error"
H.p $ "An unexpected error occured."
H.p $ toHtml err
|
nicolashery/example-marvel-haskell
|
Views/Pages/Error.hs
|
Haskell
|
mit
| 572
|
import Control.Applicative
import Data.Char
import Data.Tuple
newtype Parser result = Parser { runParser :: String ->
[(String, result)] }
succeed :: r -> Parser r
succeed v = Parser $ \stream -> [(stream, v)]
instance Functor Parser where
fmap f (Parser pattern) = Parser $ (fmap . fmap . fmap) f pattern
instance Applicative Parser where
pure result = succeed result
Parser pattern_map <*> Parser pattern
= Parser $ \s -> [(u, f a) | (t, f) <- pattern_map s, (u, a) <- pattern t]
satisfy :: (Char -> Bool) -> Parser Char
satisfy p = Parser $ \s -> case s of
[] -> []
a:as
| p a -> [(as, a)]
| otherwise -> []
char :: Char -> Parser Char
char = satisfy . (==)
alpha = satisfy isAlpha
digit = satisfy isDigit
space = satisfy isSpace
charList :: String -> Parser Char
charList = satisfy . (flip elem)
string :: String -> Parser String
string [] = pure []
string (c:cs) = (:) <$> char c <*> string cs
instance Alternative Parser where
empty = Parser $ const []
Parser pattern1 <|> Parser pattern2 = Parser $ liftA2 (++) pattern1 pattern2
end :: Parser ()
end = Parser $ \stream -> [(stream, ()) | null stream]
just :: Parser r -> Parser r
just pattern = const <$> pattern <*> end
(<.>) :: Parser r1 -> Parser r2 -> Parser r2
parser1 <.> parser2 = fmap (flip const) parser1 <*> parser2
(<?>) :: (r -> Bool) -> Parser r -> Parser r
predicate <?> (Parser parser)
= Parser $ \s -> [(t, r) | (t, r) <- parser s, predicate r]
number = (fmap (:) digit) <*> (number <|> succeed [])
|
markstoehr/cs161
|
_site/fls/Lab5_flymake.hs
|
Haskell
|
cc0-1.0
| 1,570
|
{-# LANGUAGE OverloadedStrings #-}
{-
Copyright 2019 The CodeWorld Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
-}
import Data.Text (Text)
import RegexShim
import Test.Framework (Test, defaultMain, testGroup)
import Test.HUnit hiding (Test)
import Test.Framework.Providers.HUnit (testCase)
main :: IO ()
main = defaultMain [allTests]
allTests :: Test
allTests = testGroup "RegexShim"
[
testCase "replaces groups" $ testReplacesGroups,
testCase "replaces multiple occurrences" $ testReplacesMultiGroups
]
testReplacesGroups :: Assertion
testReplacesGroups = do
let result = replace "a(b*)c(d*)e" "x\\2y\\1z" "abbbcdddde"
assertEqual "result" "xddddybbbz" result
testReplacesMultiGroups :: Assertion
testReplacesMultiGroups = do
let result = replace "a(b*)c(d*)e" "x\\1y\\2y\\1z" "abbbcddde"
assertEqual "result" "xbbbydddybbbz" result
|
alphalambda/codeworld
|
codeworld-error-sanitizer/test/Main.hs
|
Haskell
|
apache-2.0
| 1,410
|
{-# LANGUAGE TypeOperators #-}
------------------------------------------------------------------------------
module OpenArms.App where
------------------------------------------------------------------------------
import Control.Monad.Reader
import Servant
import Network.Wai
import Control.Monad.Trans.Either
------------------------------------------------------------------------------
import OpenArms.Config
import OpenArms.Core
import OpenArms.API
------------------------------------------------------------------------------
-- | Application
app :: AppConfig -> Application
app cfg = serve (Proxy :: Proxy OpenArmsAPI) server
where
server :: Server OpenArmsAPI
server = enter runV handlers
runV :: OpenArms :~> EitherT ServantErr IO
runV = Nat $ bimapEitherT toErr id . flip runReaderT cfg . runOpenArms
toErr :: String -> ServantErr
toErr = undefined
handlers :: ServerT OpenArmsAPI OpenArms
handlers = apiEndpoints
|
dmjio/openarms
|
src/OpenArms/App.hs
|
Haskell
|
bsd-2-clause
| 969
|
{-# LANGUAGE DataKinds, RecordWildCards, TypeOperators #-}
module Sprockell where
import CLaSH.Prelude
{-------------------------------------------------------------
| SPROCKELL: Simple PROCessor in hasKELL :-)
|
| j.kuper@utwente.nl
| October 28, 2012
-------------------------------------------------------------}
-- Types
type Word = Signed 16
type RegBankSize = 8
type ProgMemSize = 128
type DataMemSize = 128
type RegBank = Vec RegBankSize Word
type ProgMem = Vec ProgMemSize Assembly
type DataMem = Vec DataMemSize Word
type RegBankAddr = Unsigned 3
type ProgMemAddr = Unsigned 7
type DataMemAddr = Unsigned 7
-- value to be put in Register Bank
data RegValue = RAddr DataMemAddr
| RImm Word
deriving (Eq,Show)
-- value to be put in data memory
data MemValue = MAddr RegBankAddr
| MImm Word
deriving (Eq,Show)
data LdCode = NoLoad
| LdImm
| LdAddr
| LdAlu
deriving (Eq,Show)
data StCode = NoStore
| StImm
| StReg
deriving (Eq,Show)
data SPCode = None
| Up
| Down
deriving (Eq,Show)
data JmpCode = NoJump -- No jump
| UA -- UnConditional - Absolute
| UR -- UnConditional - Relative
| CA -- Conditional - Absolute
| CR -- Conditional - Relative
| Back -- Back from subroutine
deriving (Eq,Show)
data MachCode = MachCode { ldCode :: LdCode -- 0/1: load from dmem to rbank?
, stCode :: StCode -- storeCode
, spCode :: SPCode
, opCode :: OpCode -- opCode
, immvalueR :: Word -- value from Immediate - to regbank
, immvalueS :: Word -- value from Immediate - to store
, fromreg0 :: RegBankAddr -- ibid, first parameter of Compute
, fromreg1 :: RegBankAddr -- ibid, second parameter of Compute
, fromaddr :: DataMemAddr -- address in dmem
, toreg :: RegBankAddr -- ibid, third parameter of Compute
, toaddr :: DataMemAddr -- address in dmem
, wen :: Bool -- enable signal for store
, jmpCode :: JmpCode -- 0/1: indicates a jump
, jumpN :: ProgMemAddr -- which instruction to jump to
}
deriving (Eq,Show)
data OpCode = NoOp | Id | Incr | Decr -- no corresponding functions in prog.language
| Neg | Not -- unary operations
| Add | Sub | Mul | Equal | NEq | Gt | Lt | And | Or -- binary operations
deriving (Eq,Show)
data Assembly = Compute OpCode RegBankAddr RegBankAddr RegBankAddr -- Compute opCode r0 r1 r2: go to "alu",
-- do "opCode" on regs r0, r1, and put result in reg r2
| Jump JmpCode ProgMemAddr -- JumpAbs n: set program counter to n
| Load RegValue RegBankAddr -- Load (Addr a) r : from "memory a" to "regbank r"
-- Load (Imm v) r : put "Int v" in "regbank r"
| Store MemValue DataMemAddr -- Store (Addr r) a: from "regbank r" to "memory a"
-- Store (Imm v) r: put "Int v" in "memory r"
| Push RegBankAddr -- push a value on the stack
| Pop RegBankAddr -- pop a value from the stack
| EndProg -- end of program, handled bij exec function
| Debug Word
deriving (Eq,Show)
--record type for internal state of processor
data PState = PState { regbank :: RegBank -- register bank
, dmem :: DataMem -- main memory, data memory
, cnd :: Bool -- condition register (whether condition was true)
, pc :: ProgMemAddr
, sp :: DataMemAddr
}
deriving (Eq, Show)
-- move reg0 reg1 = Compute Id reg0 zeroreg reg1
-- wait = Jump UR 0
nullcode = MachCode { ldCode = NoLoad
, stCode = NoStore
, spCode = None
, opCode = NoOp
, immvalueR = 0
, immvalueS = 0
, fromreg0 = 0
, fromreg1 = 0
, fromaddr = 0
, toreg = 0
, toaddr = 0
, wen = False
, jmpCode = NoJump
, jumpN = 0
}
-- {-------------------------------------------------------------
-- | some constants
-- -------------------------------------------------------------}
-- zeroreg = 0 :: RegBankAddr
-- regA = 1 :: RegBankAddr
-- regB = 2 :: RegBankAddr
-- endreg = 3 :: RegBankAddr -- for FOR-loop
-- stepreg = 4 :: RegBankAddr -- ibid
jmpreg = 5 :: RegBankAddr -- for jump instructions
-- pcreg = 7 :: RegBankAddr -- pc is added at the end of the regbank => regbank0
-- sp0 = 20 :: DataMemAddr -- TODO: get sp0 from compiler, add OS
tobit True = 1
tobit False = 0
oddB = (== 1) . lsb
-- wmax :: Word -> Word -> Word
-- wmax w1 w2 = if w1 > w2 then w1 else w2
-- (<~) :: RegBank -> (RegBankAddr, Word) -> RegBank
-- xs <~ (0, x) = xs
-- xs <~ (7, x) = xs
-- xs <~ (i, x) = xs'
-- where
-- addr = i
-- xs' = vreplace xs (fromUnsigned addr) x
-- (<~~) :: DataMem -> (Bool, DataMemAddr, Word) -> DataMem
-- xs <~~ (False, i, x) = xs
-- xs <~~ (True, i , x) = vreplace xs i x
{-------------------------------------------------------------
| The actual Sprockell
-------------------------------------------------------------}
decode :: (ProgMemAddr, DataMemAddr) -> Assembly -> MachCode
decode (pc, sp) instr = case instr of
Compute c i0 i1 i2 -> nullcode {ldCode = LdAlu, opCode = c, fromreg0 = i0, fromreg1=i1, toreg=i2}
Jump jc n -> nullcode {jmpCode = jc, fromreg0 = jmpreg, jumpN = n}
Load (RImm n) j -> nullcode {ldCode = LdImm, immvalueR = n, toreg = j}
Load (RAddr i) j -> nullcode {ldCode = LdAddr, fromaddr = i, toreg = j}
Store (MAddr i) j -> nullcode {stCode = StReg, fromreg0 = i, toaddr = j, wen = True}
Store (MImm n) j -> nullcode {stCode = StImm, immvalueS = n, toaddr = j, wen = True}
Push r -> nullcode {stCode = StReg, fromreg0 = r, toaddr = sp + 1, spCode = Up, wen = True}
Pop r -> nullcode {ldCode = LdAddr, fromaddr = sp, toreg = r, spCode = Down}
EndProg -> nullcode
Debug _ -> nullcode
alu :: OpCode -> (Word, Word) -> (Word, Bool)
alu opCode (x, y) = (z, cnd)
where
(z, cnd) = (app opCode x y, oddB z)
app opCode = case opCode of
Id -> \x y -> x -- identity function on first argument
Incr -> \x y -> x + 1 -- increment first argument with 1
Decr -> \x y -> x - 1 -- decrement first argument with 1
Neg -> \x y -> -x
Add -> (+) -- goes without saying
Sub -> (-)
Mul -> (*)
Equal -> (tobit.).(==) -- test for equality; result 0 or 1
NEq -> (tobit.).(/=) -- test for inequality
Gt -> (tobit.).(>)
Lt -> (tobit.).(<)
And -> (*)
Or -> \x y -> 0
Not -> \x y -> 1-x
NoOp -> \x y -> 0 -- result will always be 0
-- load :: RegBank -> LdCode -> RegBankAddr -> (Word, Word, Word) -> RegBank
-- load regbank ldCode toreg (immvalueR, mval, z) = regbank'
-- where
-- v = case ldCode of
-- NoLoad -> 0
-- LdImm -> immvalueR
-- LdAddr -> mval
-- LdAlu -> z
-- regbank' = regbank <~ (toreg, v)
-- store :: DataMem -> StCode -> (Bool, DataMemAddr) -> (Word, Word) -> DataMem
-- store dmem stCode (wen, toaddr) (immvalueS, x) = dmem'
-- where
-- v = case stCode of
-- NoStore -> 0
-- StImm -> immvalueS
-- StReg -> x
-- dmem' = dmem <~~ (wen, toaddr, v)
-- pcUpd :: (JmpCode, Bool) -> (ProgMemAddr, ProgMemAddr, Word) -> ProgMemAddr
-- pcUpd (jmpCode, cnd) (pc, jumpN, x) = pc'
-- where
-- pc' = case jmpCode of
-- NoJump -> inc pc
-- UA -> jumpN
-- UR -> pc + jumpN
-- CA -> if cnd then jumpN else inc pc
-- CR -> if cnd then pc + jumpN else inc pc
-- Back -> bv2u (vdrop d9 (s2bv x))
-- inc i = i + 1
-- spUpd :: SPCode -> DataMemAddr -> DataMemAddr
-- spUpd spCode sp = case spCode of
-- Up -> sp + 1
-- Down -> sp - 1
-- None -> sp
-- -- ======================================================================================
-- -- Putting it all together
-- sprockell :: ProgMem -> (State PState) -> Bit -> (State PState, Bit)
-- sprockell prog (State state) inp = (State (PState {dmem = dmem',regbank = regbank',cnd = cnd',pc = pc',sp = sp'}), outp)
-- where
-- PState{..} = state
-- MachCode{..} = decode (pc,sp) (prog ! (fromUnsigned pc))
-- regbank0 = vreplace regbank (fromUnsigned pcreg) (pc2wrd pc)
-- (x,y) = (regbank0 ! (fromUnsigned fromreg0) , regbank0 ! (fromUnsigned fromreg1))
-- mval = dmem ! fromaddr
-- (z,cnd') = alu opCode (x,y)
-- regbank' = load regbank ldCode toreg (immvalueR,mval,z)
-- dmem' = store dmem stCode (wen,toaddr) (immvalueS,x)
-- pc' = pcUpd (jmpCode,cnd) (pc,jumpN,x)
-- sp' = spUpd spCode sp
-- outp = inp
-- pc2wrd pca = bv2s (u2bv (resizeUnsigned pca :: Unsigned 16))
-- prog1 = vcopy EndProg
-- initstate = PState {
-- regbank = vcopy 0,
-- dmem = vcopy 0,
-- cnd = False,
-- pc = 0,
-- sp = sp0
-- }
-- sprockellL = sprockell prog1 ^^^ initstate
topEntity = alu
|
christiaanb/clash-compiler
|
examples/Sprockell.hs
|
Haskell
|
bsd-2-clause
| 11,221
|
-- 161667
import Data.List(sort, group)
nn = 1500000
-- generate all primitive pythagorean triples w/ Euclid's formula
-- a = m^2 - n^2, b = 2mn, c = m^2 + n^2
-- m - n is odd and m and n are coprime
genTri x m n
| n >= m = genTri x (m+1) 1 -- invalid pair, next m
| n == 1 && p > x = [] -- perimeter too big, done
| p > x = genTri x (m+1) 1 -- perimeter too big, next m
| even (m-n) = genTri x m (n+1) -- m-n must be odd, next n
| gcd m n /= 1 = genTri x m (n+2) -- must be coprime, next n
| otherwise = p : genTri x m (n+2) -- keep, next n
where p = 2*m*(m+n)
-- generate all pythagorean triples by multiplying by constant factors
-- count how many of each there are and count unique perimeters
countTri p = length $ filter (==1) $ map length $ group $ sort $
concatMap (\x -> takeWhile (p>=) $ map (x*) [1..]) $ genTri p 1 1
main = putStrLn $ show $ countTri nn
|
higgsd/euler
|
hs/75.hs
|
Haskell
|
bsd-2-clause
| 947
|
{-# LANGUAGE TypeSynonymInstances, TypeOperators, FlexibleInstances,
StandaloneDeriving, DeriveFunctor, DeriveFoldable,
DeriveTraversable #-}
-----------------------------------------------------------------------------
-- |
-- Module : Data.Xournal.Select
-- Copyright : (c) 2011, 2012 Ian-Woo Kim
--
-- License : BSD3
-- Maintainer : Ian-Woo Kim <ianwookim@gmail.com>
-- Stability : experimental
-- Portability : GHC
--
-- representing selection of xournal type
--
-----------------------------------------------------------------------------
module Data.Xournal.Select where
import Control.Applicative hiding (empty)
import Control.Compose
import Data.Foldable
import Data.Monoid
import Data.Sequence
import Data.Traversable
-- from this package
import Data.Xournal.Generic
--
import Prelude hiding (zipWith, length, splitAt)
-- |
newtype SeqZipper a = SZ { unSZ :: (a, (Seq a,Seq a)) }
-- |
deriving instance Functor SeqZipper
-- |
deriving instance Foldable SeqZipper
-- |
instance Applicative SeqZipper where
pure = singletonSZ
SZ (f,(f1s,f2s)) <*> SZ (x,(y1s,y2s)) = SZ (f x, (zipWith id f1s y1s, zipWith id f2s y2s))
-- |
deriving instance Traversable SeqZipper
-- |
singletonSZ :: a -> SeqZipper a
singletonSZ x = SZ (x, (empty,empty))
-- |
lengthSZ :: SeqZipper a -> Int
lengthSZ (SZ (_x, (x1s,x2s))) = length x1s + length x2s + 1
-- |
currIndex :: SeqZipper a -> Int
currIndex (SZ (_x, (x1s,_x2s))) = length x1s
-- |
appendGoLast :: SeqZipper a -> a -> SeqZipper a
appendGoLast (SZ (y,(y1s,y2s))) x = SZ (x, ((y1s |> y) >< y2s, empty))
-- |
chopFirst :: SeqZipper a -> Maybe (SeqZipper a)
chopFirst (SZ (y,(y1s,y2s))) =
case viewl y1s of
EmptyL -> case viewl y2s of
EmptyL -> Nothing
z :< zs -> Just (SZ (z,(empty,zs)))
_z :< zs -> Just (SZ (y,(zs,y2s)))
-- |
moveLeft :: SeqZipper a -> Maybe (SeqZipper a)
moveLeft (SZ (x,(x1s,x2s))) =
case viewr x1s of
EmptyR -> Nothing
zs :> z -> Just (SZ (z,(zs,x<|x2s)))
-- |
moveRight :: SeqZipper a -> Maybe (SeqZipper a)
moveRight (SZ (x,(x1s,x2s))) =
case viewl x2s of
EmptyL -> Nothing
z :< zs -> Just (SZ (z,(x1s|>x,zs)))
-- |
moveTo :: Int -> SeqZipper a -> Maybe (SeqZipper a)
moveTo n orig@(SZ (x,(x1s,x2s))) =
let n_x1s = length x1s
n_x2s = length x2s
res | n < 0 || n > n_x1s + n_x2s = Nothing
| n == n_x1s = Just orig
| n < n_x1s = let (x1s1, x1s2) = splitAt n x1s
el :< rm = viewl x1s2
in Just (SZ (el, (x1s1,(rm |> x) >< x2s)))
| n > n_x1s = let (x2s1,x2s2) = splitAt (n-n_x1s-1) x2s
el :< rm = viewl x2s2
in Just (SZ (el, ((x1s |> x) >< x2s1, rm)))
| otherwise = error "error in moveTo"
in res
-- |
goFirst :: SeqZipper a -> SeqZipper a
goFirst orig@(SZ (x,(x1s,x2s))) =
case viewl x1s of
EmptyL -> orig
z :< zs -> SZ (z,(empty, zs `mappend` (x <| x2s)))
-- |
goLast :: SeqZipper a -> SeqZipper a
goLast orig@(SZ (x,(x1s,x2s))) =
case viewr x2s of
EmptyR -> orig
zs :> z -> SZ (z,((x1s |> x) `mappend` zs , empty))
-- |
current :: SeqZipper a -> a
current (SZ (x,(_,_))) = x
-- |
prev :: SeqZipper a -> Maybe a
prev = fmap current . moveLeft
-- |
next :: SeqZipper a -> Maybe a
next = fmap current . moveRight
-- |
replace :: a -> SeqZipper a -> SeqZipper a
replace y (SZ (_x,zs)) = SZ (y,zs)
-- |
deleteCurrent :: SeqZipper a -> Maybe (SeqZipper a)
deleteCurrent (SZ (_,(xs,ys))) =
case viewl ys of
EmptyL -> case viewr xs of
EmptyR -> Nothing
zs :> z -> Just (SZ (z,(zs,ys)))
z :< zs -> Just (SZ (z,(xs,zs)))
-- |
data ZipperSelect a = NoSelect { allelems :: [a] }
| Select { zipper :: (Maybe :. SeqZipper) a }
-- |
deriving instance Functor ZipperSelect
-- |
selectFirst :: ZipperSelect a -> ZipperSelect a
selectFirst (NoSelect []) = NoSelect []
selectFirst (NoSelect lst@(_:_)) = Select . gFromList $ lst
selectFirst (Select (O Nothing)) = NoSelect []
selectFirst (Select (O msz)) = Select . O $ return . goFirst =<< msz
-- |
instance GListable (Maybe :. SeqZipper) where
gFromList [] = O Nothing
gFromList (x:xs) = O (Just (SZ (x, (empty,fromList xs))))
gToList (O Nothing) = []
gToList (O (Just (SZ (x,(xs,ys))))) = toList xs ++ (x : toList ys)
-- |
instance GListable ZipperSelect where
gFromList xs = NoSelect xs
gToList (NoSelect xs) = xs
gToList (Select xs) = gToList xs
-- |
deriving instance Foldable ZipperSelect
-- |
deriving instance Traversable ZipperSelect
|
wavewave/xournal-types
|
src/Data/Xournal/Select.hs
|
Haskell
|
bsd-2-clause
| 4,701
|
module Day14_2 where
import Data.List
import Data.List.Split
type DeerInfo = (Int, String, Int, Int, Int)
main :: IO ()
main = do
f <- readFile "input.txt"
let deers = map parse $ map (splitOn " ") (lines f)
distAt = [distanceAtTime d 1 | d <- deers]
score = foldl calc deers [1..2503]
winner = head ((reverse . sort) score)
putStrLn $ "Optimal: " ++ show score
putStrLn (show winner)
putStrLn $ "Dist: " ++ (show distAt)
calc :: [DeerInfo] -> Int -> [DeerInfo]
calc deers sec = foldr incWinner deers winners
where winners = getWinners deers sec
getWinners :: [DeerInfo] -> Int -> [String]
getWinners deers sec = map snd $ takeWhile (\x -> fst x == high) sorted
where mapped = [(distanceAtTime d sec, name) | d@(_, name, _, _, _) <- deers]
sorted = reverse $ sort mapped
high = fst $ head sorted
incWinner :: String -> [DeerInfo] -> [DeerInfo]
incWinner name (x@(points, namex, a, b, c):xs)
| name == namex = (points+1, namex, a, b, c) : xs
| otherwise = (x:incWinner name xs)
parse :: [String] -> DeerInfo
parse [name, _can, _fly, speed, _kms, _for, time, _seconds,
_but, _then, _must, _rest, _for2, rest, _sec2] =
(0, name, read speed, read time, read rest)
distanceAtTime :: DeerInfo -> Int -> Int
distanceAtTime deer@(_, _name, speed, time, rest) dur
= sum $ take dur $ cycle $ activePeriod ++ restingPeriod
where activePeriod = take time (repeat speed)
restingPeriod = take rest (repeat 0)
|
ksallberg/adventofcode
|
2015/src/Day14_2.hs
|
Haskell
|
bsd-2-clause
| 1,508
|
module Infinity.Util (
-- * Functions
unlessM, whenM, mkdate, mktime, mkdir, ci,
run,
-- * Types
User, Channel, Command, Nick, Cmds
) where
import Data.List
import System.IO
import System.Exit
import System.Time
import Control.Monad
import System.Process
import System.FilePath
import System.Directory
import Control.Concurrent
import Control.Exception as Ex
type Nick = String
type User = String
type Channel = String
type Command = String
type Cmds = [String]
-- | Runs an executable program, returning output and anything from stderr
run :: FilePath -> [String] -> Maybe String -> IO (String,String)
run file args input = do
(inp,out,err,pid) <- runInteractiveProcess file args Nothing Nothing
case input of
Just i -> hPutStr inp i >> hClose inp
Nothing -> return ()
-- get contents
output <- hGetContents out
errs <- hGetContents err
-- at this point we force their evaluation
-- since hGetContents is lazy.
oMVar <- newEmptyMVar
eMVar <- newEmptyMVar
forkIO (Ex.evaluate (length output) >> putMVar oMVar ())
forkIO (Ex.evaluate (length errs) >> putMVar eMVar ())
takeMVar oMVar >> takeMVar eMVar
-- wait and return
Prelude.catch (waitForProcess pid) $ const (return ExitSuccess)
return (output,errs)
-- | Makes the current date, i.e. 1-8-08
mkdate :: IO String
mkdate = do
time <- (getClockTime >>= toCalendarTime)
let date = ci "-" $ map show $ [(fromEnum $ ctMonth time)+1,ctDay time,ctYear time]
return date
-- | Makes the current time, i.e. '22:14'
mktime :: IO String
mktime = do
time <- (getClockTime >>= toCalendarTime)
let h = show $ ctHour time
m = show $ ctMin time
h' = if (length h) == 1 then "0"++h else h
m' = if (length m) == 1 then "0"++m else m
return (h'++":"++m')
-- | Creates a directory if it doesn't already
-- exist
mkdir :: FilePath -> IO ()
mkdir p = unlessM (doesDirectoryExist p) (createDirectory p)
-- | unless with it's first parameter wrapped in
-- IO, i.e @unlessM b f = b >>= \x -> unless x f@
unlessM :: IO Bool -> IO () -> IO ()
unlessM b f = b >>= \x -> unless x f
-- | when with it's first parameter wrapped in
-- IO, i.e. @whenM b f = b >>= \x -> when x f@
whenM :: IO Bool -> IO () -> IO ()
whenM b f = b >>= \x -> when x f
-- | Concatenates a list of Strings and
-- intersperses a character inbetween each
-- element
ci :: String -> [String] -> String
ci x s = concat $ intersperse x s
|
thoughtpolice/infinity
|
src/Infinity/Util.hs
|
Haskell
|
bsd-3-clause
| 2,442
|
module Sword.Daemon where
import Prelude hiding (Either(..))
import qualified Data.Map as Map
import Data.Time (UTCTime, getCurrentTime, diffUTCTime)
import Network.Socket
import System.IO
import Control.Exception
import Control.Concurrent
import Control.Concurrent.Chan
import Control.Monad
import Control.Monad.Fix (fix)
import Sword.Utils
import Sword.World
import Sword.Hero
import Sword.Gui
type Msg = (Int, String, String)
daemonStart :: IO ()
daemonStart = do
timeNow <- getCurrentTime
level <- readFile "src/levels/0A_level.txt"
let (world, worldMap) = loadLevel level timeNow
chan <- newChan
sock <- socket AF_INET Stream 0
setSocketOption sock ReuseAddr 1
bindSocket sock (SockAddrInet 4242 iNADDR_ANY)
-- allow a maximum of 2 outstanding connections
listen sock 2
forkIO (daemonGameLoop chan world worldMap)
newChan <- dupChan chan
forkIO (monsterAlert newChan)
daemonAcceptLoop worldMap sock chan 1
monsterAlert :: Chan Msg -> IO ()
monsterAlert chan = do
threadDelay 500000
writeChan chan (5, "", "")
monsterAlert chan
daemonGameLoop :: Chan Msg -> World -> WorldMap -> IO ()
daemonGameLoop chan world worldMap = do
(nr, input, arg) <- readChan chan
tnow <- getCurrentTime
case (nr, input, arg) of
(0, _, _) ->
daemonGameLoop chan (modifyWorld 0 None tnow worldMap world) worldMap
(5, "", "") -> do
let newxWorld = modifyWorld 0 None tnow worldMap world
writeChan chan (0, show newxWorld ++ "\n", "")
daemonGameLoop chan newxWorld worldMap
(x, "login", name) ->
daemonGameLoop chan (addHero name x tnow world) worldMap
(x, "quit", _) ->
daemonGameLoop chan (removeHero x world) worldMap
(x, input, "") -> do
let newWorld = modifyWorld x (convertInput input) tnow worldMap world
writeChan chan (0, show newWorld ++ "\n", "")
daemonGameLoop chan newWorld worldMap
otherwise -> daemonGameLoop chan world worldMap
daemonAcceptLoop :: WorldMap -> Socket -> Chan Msg -> Int -> IO ()
daemonAcceptLoop wldMap sock chan nr = do
conn <- accept sock
forkIO (runConn conn chan nr wldMap)
daemonAcceptLoop wldMap sock chan $! nr + 1
runConn :: (Socket, SockAddr) -> Chan Msg -> Int -> WorldMap -> IO ()
runConn (sock, _) chan nr worldMap = do
hdl <- socketToHandle sock ReadWriteMode
hSetBuffering hdl LineBuffering
name <- liftM init (hGetLine hdl)
hPrint hdl worldMap
hPrint hdl nr
chan' <- dupChan chan
writeChan chan' (nr, "login", name)
reader <- forkIO $ fix $ \loop -> do
(nr', line, _) <- readChan chan'
when (nr' == 0) $ hPutStrLn hdl line
hFlush hdl
loop
handle (\(SomeException _) -> return ()) $ fix $ \loop -> do
line <- hGetLine hdl
case line of
"quit" -> do
writeChan chan (nr, "quit", "")
hPutStrLn hdl "Bye!"
_ -> do
writeChan chan (nr, line, "")
loop
killThread reader
hClose hdl
loop
loadLevel :: String -> UTCTime -> (World, WorldMap)
loadLevel str tnow = foldl consume (emptyWorld, Map.empty) elems
where lns = lines str
coords = [[(x,y) | x <- [0..]] | y <- [0..]]
elems = concat $ zipWith zip coords lns
consume (wld, wldMap) (c, elt) =
case elt of
'@' -> (wld, Map.insert c Ground wldMap)
'x' -> (wld{monster = Map.insert c emptyMonster{mlastMove = tnow} (monster wld)},
Map.insert c Ground wldMap)
'#' -> (wld, Map.insert c Wall wldMap)
'4' -> (wld, Map.insert c Tree wldMap)
'.' -> (wld, Map.insert c Ground wldMap)
otherwise -> error (show elt ++ " not recognized")
convertInput :: String -> Input
convertInput [] = None
convertInput (char:xs) =
case char of
'k' -> Up
'j' -> Down
'h' -> Left
'l' -> Right
'K' -> FightUp
'J' -> FightDown
'H' -> FightLeft
'L' -> FightRight
'q' -> Quit
otherwise -> None
|
kmerz/the_sword
|
src/Sword/Daemon.hs
|
Haskell
|
bsd-3-clause
| 3,924
|
-- Turnir -- a tool for tournament management.
--
-- Author : Ivan N. Veselov
-- Created: 20-Sep-2010
--
-- Copyright (C) 2010 Ivan N. Veselov
--
-- License: BSD3
--
-- | Pretty printing of miscelanneous data structures.
-- Uses wonderful HughesPJ pretty-printing combinator library.
--
module Pretty (
ppTable,
ppRounds
) where
import Text.PrettyPrint.HughesPJ
import Types
--
-- Helper functions
--
t :: String -> Doc
t = text
pp :: Show a => a -> Doc
pp = t . show
dash = char '-'
plus = char '+'
pipe = char '|'
vpunctuate :: Doc -> [Doc] -> [Doc]
vpunctuate p [] = []
vpunctuate p (d:ds) = go d ds
where
go d [] = [d]
go d (e:es) = (d $$ p) : go e es
--
-- Pretty-printing
--
-- | Prints one round information
ppRound :: Int -> [Player] -> Table -> Doc
ppRound r ps table = vcat [ t "Round" <+> int r
, nest o (ppGames games)
, nest o (ppByes byes)
, space
]
where ppGames = vcat . map ppGame
ppGame (Game gid _ p1 p2 res) =
hsep [int gid <> colon, pp p1, dash, pp p2, parens . pp $ res]
ppByes [] = empty
ppByes bs = hsep . (t "bye:" :) . map pp $ bs
games = roundGames r table
byes = roundByes r ps table
o = 2 -- outline of games
-- | Pretty-prints all the rounds, using players list
ppRounds :: [Player] -> Table -> Doc
ppRounds ps table =
vcat . map (\r -> ppRound r ps table) $ [1 .. maxRound table]
-- | Pretty-prints all the rounds, using players list
ppTable :: [Player] -> Table -> Doc
ppTable ps t =
table (header : cells)
where
header = "name" : map show [1 .. n]
cells = map (\i -> playerName (ps !! i) : map (\j -> result i j t) [0 .. n - 1]) [0 .. n - 1]
n = length ps
result i j t = pp $ gameByPlayers (ps !! i) (ps !! j) t
pp Nothing = " "
pp (Just x) = show . gameResult $ x
--
-- Pretty-printing textual tables
--
-- | helper functions, ecloses list with pluses or pipes
pluses xs = plus <> xs <> plus
pipes xs = pipe <+> xs <+> pipe
-- | return widths of columns, currently width is unbound
widths :: [[String]] -> [Int]
widths = map (+2) . foldl1 (zipWith max) . map (map length)
-- | makes separator doc
s :: [[String]] -> Doc
s = pluses . hcat . punctuate plus . map (t . flip replicate '-') . widths
-- | makes values doc (row with values, separated by "|")
v :: [Int] -- ^ list which contains width of every column
-> [String] -- ^ list with cells
-> Doc
v ws dt = pipes . hcat . punctuate (t " | ") $ zipWith fill ws dt
-- | `fills` string to make it of the given length (actually adds spaces)
-- currently, it adds spaces to the right, eventually alignment will be used
fill :: Int -> String -> Doc
fill n s
| length s < n = t s <> hcat (replicate (n - length s - 2) space)
| otherwise = t (take n s)
-- | pretty prints table with data (first list is header row, next ones are rows with data)
table dt = sepRow $$ (vcat . vpunctuate sepRow $ map (v ws) dt) $$ sepRow
where
sepRow = s dt
ws = widths dt
-- test data
headers = ["ID", "Name", "Price"]
dt1 = ["1", "iPad", "12.00"]
dt2 = ["2", "Cool laptop", "122.00"]
dt3 = ["3", "Yet another cool laptop", "12004.44"]
t1 = [headers, dt1, dt2, dt3]
|
sphynx/turnir
|
src/Pretty.hs
|
Haskell
|
bsd-3-clause
| 3,320
|
{-# LANGUAGE OverloadedStrings #-}
module Main where
import Test.Framework
import Test.Framework.Providers.HUnit
import Test.HUnit
import Text.IPv6Addr
main :: IO ()
main = defaultMain $ hUnitTestToTests tests
tests :: Test.HUnit.Test
tests = TestList
[ (~?=) (maybeIPv6Addr ":") Nothing
, (~?=) (maybeIPv6Addr "::") (Just (IPv6Addr "::"))
, (~?=) (maybeIPv6Addr ":::") Nothing
, (~?=) (maybeIPv6Addr "::::") Nothing
, (~?=) (maybeIPv6Addr "::df0::") Nothing
, (~?=) (maybeIPv6Addr "0:0:0:0:0:0:0") Nothing
, (~?=) (maybeIPv6Addr "0:0:0:0:0:0:0:0") (Just (IPv6Addr "::"))
, (~?=) (maybeIPv6Addr "0:0:0:0:0:0:0:0:0") Nothing
, (~?=) (maybeIPv6Addr "1") Nothing
, (~?=) (maybeIPv6Addr "::1") (Just (IPv6Addr "::1"))
, (~?=) (maybeIPv6Addr ":::1") Nothing
, (~?=) (maybeIPv6Addr "::1:") Nothing
, (~?=) (maybeIPv6Addr "0000:0000:0000:0000:0000:0000:0000:0001") (Just (IPv6Addr "::1"))
, (~?=) (maybeIPv6Addr "0:0:0:0:0:0:0:1") (Just (IPv6Addr "::1"))
, (~?=) (maybeIPv6Addr "a") Nothing
, (~?=) (maybeIPv6Addr "ab") Nothing
, (~?=) (maybeIPv6Addr "abc") Nothing
, (~?=) (maybeIPv6Addr "abcd") Nothing
, (~?=) (maybeIPv6Addr "abcd:") Nothing
, (~?=) (maybeIPv6Addr "abcd::") (Just (IPv6Addr "abcd::"))
, (~?=) (maybeIPv6Addr "abcd:::") Nothing
, (~?=) (maybeIPv6Addr "abcde::") Nothing
, (~?=) (maybeIPv6Addr "a::") (Just (IPv6Addr "a::"))
, (~?=) (maybeIPv6Addr "0a::") (Just (IPv6Addr "a::"))
, (~?=) (maybeIPv6Addr "00a::") (Just (IPv6Addr "a::"))
, (~?=) (maybeIPv6Addr "000a::") (Just (IPv6Addr "a::"))
, (~?=) (maybeIPv6Addr "0000a::") Nothing
, (~?=) (maybeIPv6Addr "adb6") Nothing
, (~?=) (maybeIPv6Addr "adb6ce67") Nothing
, (~?=) (maybeIPv6Addr "adb6:ce67") Nothing
, (~?=) (maybeIPv6Addr "adb6::ce67") (Just (IPv6Addr "adb6::ce67"))
, (~?=) (maybeIPv6Addr "::1.2.3.4") (Just (IPv6Addr "::1.2.3.4"))
, (~?=) (maybeIPv6Addr "::ffff:1.2.3.4") (Just (IPv6Addr "::ffff:1.2.3.4"))
, (~?=) (maybeIPv6Addr "::ffff:0:1.2.3.4") (Just (IPv6Addr "::ffff:0:1.2.3.4"))
, (~?=) (maybeIPv6Addr "64:ff9b::1.2.3.4") (Just (IPv6Addr "64:ff9b::1.2.3.4"))
, (~?=) (maybeIPv6Addr "fe80::5efe:1.2.3.4") (Just (IPv6Addr "fe80::5efe:1.2.3.4"))
, (~?=) (maybeIPv6Addr "FE80:CD00:0000:0CDE:1257:0000:211E:729C") (Just (IPv6Addr "fe80:cd00:0:cde:1257:0:211e:729c"))
, (~?=) (maybeIPv6Addr "FE80:CD00:0000:0CDE:1257:0000:211E:729X") Nothing
, (~?=) (maybeIPv6Addr "FE80:CD00:0000:0CDE:1257:0000:211E:729CX") Nothing
, (~?=) (maybeIPv6Addr "FE80:CD00:0000:0CDE:0000:211E:729C") Nothing
, (~?=) (maybeIPv6Addr "FE80:CD00:0000:0CDE:FFFF:1257:0000:211E:729C") Nothing
, (~?=) (maybeIPv6Addr "1111:2222:3333:4444:5555:6666:7777:8888") (Just (IPv6Addr "1111:2222:3333:4444:5555:6666:7777:8888"))
, (~?=) (maybeIPv6Addr ":1111:2222:3333:4444:5555:6666:7777:8888") Nothing
, (~?=) (maybeIPv6Addr "1111:2222:3333:4444:5555:6666:7777:8888:") Nothing
, (~?=) (maybeIPv6Addr "1111::3333:4444:5555:6666::8888") Nothing
, (~?=) (maybeIPv6Addr "AAAA:BBBB:CCCC:DDDD:EEEE:FFFF:0000:0000") (Just (IPv6Addr "aaaa:bbbb:cccc:dddd:eeee:ffff::"))
, (~?=) (maybeIPv6Addr "2001:db8:aaaa:bbbb:cccc:dddd:eeee:0001") (Just (IPv6Addr "2001:db8:aaaa:bbbb:cccc:dddd:eeee:1"))
, (~?=) (maybeIPv6Addr "2001:db8:aaaa:bbbb:cccc:dddd:eeee:001") (Just (IPv6Addr "2001:db8:aaaa:bbbb:cccc:dddd:eeee:1"))
, (~?=) (maybeIPv6Addr "2001:db8:aaaa:bbbb:cccc:dddd:eeee:01") (Just (IPv6Addr "2001:db8:aaaa:bbbb:cccc:dddd:eeee:1"))
, (~?=) (maybeIPv6Addr "2001:db8:aaaa:bbbb:cccc:dddd:eeee:1") (Just (IPv6Addr "2001:db8:aaaa:bbbb:cccc:dddd:eeee:1"))
, (~?=) (maybeIPv6Addr "2001:db8:aaaa:bbbb:cccc:dddd::1") (Just (IPv6Addr "2001:db8:aaaa:bbbb:cccc:dddd:0:1"))
, (~?=) (maybeIPv6Addr "2001:db8:aaaa:bbbb:cccc:dddd:0:1") (Just (IPv6Addr "2001:db8:aaaa:bbbb:cccc:dddd:0:1"))
, (~?=) (maybeIPv6Addr "2001:db8:0:0:1:0:0:1") (Just (IPv6Addr "2001:db8::1:0:0:1"))
, (~?=) (maybeIPv6Addr "2001:db8:0:1:0:0:0:1") (Just (IPv6Addr "2001:db8:0:1::1"))
, (~?=) (maybeIPv6Addr "2001:DB8:0:0:0::1") (Just (IPv6Addr "2001:db8::1"))
, (~?=) (maybeIPv6Addr "2001:0DB8:0:0::1") (Just (IPv6Addr "2001:db8::1"))
, (~?=) (maybeIPv6Addr "2001:0dB8:0::1") (Just (IPv6Addr "2001:db8::1"))
, (~?=) (maybeIPv6Addr "2001:db8::1") (Just (IPv6Addr "2001:db8::1"))
, (~?=) (maybeIPv6Addr "2001:db8:0:1::1") (Just (IPv6Addr "2001:db8:0:1::1"))
, (~?=) (maybeIPv6Addr "2001:0db8:0:1:0:0:0:1") (Just (IPv6Addr "2001:db8:0:1::1"))
, (~?=) (maybeIPv6Addr "2001:DB8::1:1:1:1:1") (Just (IPv6Addr "2001:db8:0:1:1:1:1:1"))
, (~?=) (maybeIPv6Addr "2001:DB8::1:1:0:1:1") (Just (IPv6Addr "2001:db8:0:1:1:0:1:1"))
, (~?=) (maybeIPv6Addr "fe80") Nothing
, (~?=) (maybeIPv6Addr "fe80::") (Just (IPv6Addr "fe80::"))
, (~?=) (maybeIPv6Addr "0:0:0:0:0:ffff:192.0.2.1") (Just (IPv6Addr "::ffff:192.0.2.1"))
, (~?=) (maybeIPv6Addr "::192.0.2.1") (Just (IPv6Addr "::192.0.2.1"))
, (~?=) (maybeIPv6Addr "192.0.2.1::") Nothing
, (~?=) (maybeIPv6Addr "::ffff:192.0.2.1") (Just (IPv6Addr "::ffff:192.0.2.1"))
, (~?=) (maybeIPv6Addr "fe80:0:0:0:0:0:0:0") (Just (IPv6Addr "fe80::"))
, (~?=) (maybeIPv6Addr "fe80:0000:0000:0000:0000:0000:0000:0000") (Just (IPv6Addr "fe80::"))
, (~?=) (maybeIPv6Addr "2001:db8:Bad:0:0::0:1") (Just (IPv6Addr "2001:db8:bad::1"))
, (~?=) (maybeIPv6Addr "2001:0:0:1:b:0:0:A") (Just (IPv6Addr "2001::1:b:0:0:a"))
, (~?=) (maybeIPv6Addr "2001:0:0:1:000B:0:0:0") (Just (IPv6Addr "2001:0:0:1:b::"))
, (~?=) (maybeIPv6Addr "2001:0DB8:85A3:0000:0000:8A2E:0370:7334") (Just (IPv6Addr "2001:db8:85a3::8a2e:370:7334"))
, (~?=) (maybePureIPv6Addr "0:0:0:0:0:ffff:192.0.2.1") (Just (IPv6Addr "::ffff:c000:201"))
, (~?=) (maybePureIPv6Addr "::ffff:192.0.2.1") (Just (IPv6Addr "::ffff:c000:201"))
, (~?=) (maybeFullIPv6Addr "::") (Just (IPv6Addr "0000:0000:0000:0000:0000:0000:0000:0000"))
, (~?=) (maybeFullIPv6Addr "0:0:0:0:0:0:0:0") (Just (IPv6Addr "0000:0000:0000:0000:0000:0000:0000:0000"))
, (~?=) (maybeFullIPv6Addr "::1") (Just (IPv6Addr "0000:0000:0000:0000:0000:0000:0000:0001"))
, (~?=) (maybeFullIPv6Addr "2001:db8::1") (Just (IPv6Addr "2001:0db8:0000:0000:0000:0000:0000:0001"))
, (~?=) (maybeFullIPv6Addr "a:bb:ccc:dddd:1cDc::1") (Just (IPv6Addr "000a:00bb:0ccc:dddd:1cdc:0000:0000:0001"))
, (~?=) (maybeFullIPv6Addr "FE80::0202:B3FF:FE1E:8329") (Just (IPv6Addr "fe80:0000:0000:0000:0202:b3ff:fe1e:8329"))
, (~?=) (maybeFullIPv6Addr "aDb6::CE67") (Just (IPv6Addr "adb6:0000:0000:0000:0000:0000:0000:ce67"))
, (~?=) (toIP6ARPA (IPv6Addr "::1")) "1.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.IP6.ARPA."
, (~?=) (toIP6ARPA (IPv6Addr "2b02:0b08:0:7::0001")) "1.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.7.0.0.0.0.0.0.0.8.0.b.0.2.0.b.2.IP6.ARPA."
, (~?=) (toIP6ARPA (IPv6Addr "2b02:b08:0:7::1")) "1.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.7.0.0.0.0.0.0.0.8.0.b.0.2.0.b.2.IP6.ARPA."
, (~?=) (toIP6ARPA (IPv6Addr "fdda:5cc1:23:4::1f")) "f.1.0.0.0.0.0.0.0.0.0.0.0.0.0.0.4.0.0.0.3.2.0.0.1.c.c.5.a.d.d.f.IP6.ARPA."
, (~?=) (toIP6ARPA (IPv6Addr "2001:db8::")) "0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.8.b.d.0.1.0.0.2.IP6.ARPA."
, (~?=) (toIP6ARPA (IPv6Addr "4321:0:1:2:3:4:567:89ab")) "b.a.9.8.7.6.5.0.4.0.0.0.3.0.0.0.2.0.0.0.1.0.0.0.0.0.0.0.1.2.3.4.IP6.ARPA."
, (~?=) (toUNC (IPv6Addr "2001:0DB8:002a:1005:230:48ff:FE73:989d")) "2001-db8-2a-1005-230-48ff-fe73-989d.ipv6-literal.net"
, (~?=) (toUNC (IPv6Addr "2001:0db8:85a3:0000:0000:8a2e:0370:7334")) "2001-db8-85a3--8a2e-370-7334.ipv6-literal.net"
, (~?=) (macAddrToIPv6AddrTokens "fa:1d:58:cc:95:16") (Just [SixteenBit "fa1d", Colon, SixteenBit "58cc", Colon, SixteenBit "9516"])
]
|
MichelBoucey/IPv6Addr
|
tests/Main.hs
|
Haskell
|
bsd-3-clause
| 7,625
|
{-# LANGUAGE TupleSections #-}
{-# LANGUAGE ViewPatterns #-}
{-# LANGUAGE RecordWildCards #-}
module Refact.Fixity (applyFixities) where
import SrcLoc
import Refact.Utils
import BasicTypes (Fixity(..), defaultFixity, compareFixity, negateFixity, FixityDirection(..))
import HsExpr
import RdrName
import OccName
import PlaceHolder
import Data.Generics hiding (Fixity)
import Data.Maybe
import Language.Haskell.GHC.ExactPrint.Types
import Control.Monad.State
import qualified Data.Map as Map
import Data.Tuple
-- | Rearrange infix expressions to account for fixity.
-- The set of fixities is wired in and includes all fixities in base.
applyFixities :: Anns -> Module -> (Anns, Module)
applyFixities as m = swap $ runState (everywhereM (mkM expFix) m) as
expFix :: LHsExpr RdrName -> M (LHsExpr RdrName)
expFix (L loc (OpApp l op _ r)) = do
newExpr <- mkOpAppRn baseFixities l op (findFixity baseFixities op) r
return (L loc newExpr)
expFix e = return e
getIdent :: Expr -> String
getIdent (unLoc -> HsVar n) = occNameString . rdrNameOcc $ n
getIdent _ = error "Must be HsVar"
moveDelta :: AnnKey -> AnnKey -> M ()
moveDelta old new = do
a@Ann{..} <- gets (fromMaybe annNone . Map.lookup old)
modify (Map.insert new (annNone { annEntryDelta = annEntryDelta, annPriorComments = annPriorComments }))
modify (Map.insert old (a { annEntryDelta = DP (0,0), annPriorComments = []}))
---------------------------
-- Modified from GHC Renamer
mkOpAppRn ::
[(String, Fixity)]
-> LHsExpr RdrName -- Left operand; already rearrange
-> LHsExpr RdrName -> Fixity -- Operator and fixity
-> LHsExpr RdrName -- Right operand (not an OpApp, but might
-- be a NegApp)
-> M (HsExpr RdrName)
-- (e11 `op1` e12) `op2` e2
mkOpAppRn fs e1@(L _ (OpApp e11 op1 p e12)) op2 fix2 e2
| nofix_error
= return $ OpApp e1 op2 p e2
| associate_right = do
new_e <- L loc' <$> mkOpAppRn fs e12 op2 fix2 e2
moveDelta (mkAnnKey e12) (mkAnnKey new_e)
return $ OpApp e11 op1 p new_e
where
fix1 = findFixity fs op1
loc'= combineLocs e12 e2
(nofix_error, associate_right) = compareFixity fix1 fix2
---------------------------
-- (- neg_arg) `op` e2
mkOpAppRn fs e1@(L _ (NegApp neg_arg neg_name)) op2 fix2 e2
| nofix_error
= return $ OpApp e1 op2 PlaceHolder e2
| associate_right
= do
new_e <- L loc' <$> mkOpAppRn fs neg_arg op2 fix2 e2
moveDelta (mkAnnKey neg_arg) (mkAnnKey new_e)
return (NegApp new_e neg_name)
where
loc' = combineLocs neg_arg e2
(nofix_error, associate_right) = compareFixity negateFixity fix2
---------------------------
-- e1 `op` - neg_arg
mkOpAppRn _ e1 op1 fix1 e2@(L _ (NegApp _ _)) -- NegApp can occur on the right
| not associate_right -- We *want* right association
= return $ OpApp e1 op1 PlaceHolder e2
where
(_, associate_right) = compareFixity fix1 negateFixity
---------------------------
-- Default case
mkOpAppRn _ e1 op _ e2 -- Default case, no rearrangment
= return $ OpApp e1 op PlaceHolder e2
findFixity :: [(String, Fixity)] -> Expr -> Fixity
findFixity fs r = askFix fs (getIdent r)
askFix :: [(String, Fixity)] -> String -> Fixity
askFix xs = \k -> lookupWithDefault defaultFixity k xs
where
lookupWithDefault def k mp1 = fromMaybe def $ lookup k mp1
-- | All fixities defined in the Prelude.
preludeFixities :: [(String, Fixity)]
preludeFixities = concat
[infixr_ 9 ["."]
,infixl_ 9 ["!!"]
,infixr_ 8 ["^","^^","**"]
,infixl_ 7 ["*","/","quot","rem","div","mod",":%","%"]
,infixl_ 6 ["+","-"]
,infixr_ 5 [":","++"]
,infix_ 4 ["==","/=","<","<=",">=",">","elem","notElem"]
,infixr_ 3 ["&&"]
,infixr_ 2 ["||"]
,infixl_ 1 [">>",">>="]
,infixr_ 1 ["=<<"]
,infixr_ 0 ["$","$!","seq"]
]
-- | All fixities defined in the base package.
--
-- Note that the @+++@ operator appears in both Control.Arrows and
-- Text.ParserCombinators.ReadP. The listed precedence for @+++@ in
-- this list is that of Control.Arrows.
baseFixities :: [(String, Fixity)]
baseFixities = preludeFixities ++ concat
[infixl_ 9 ["!","//","!:"]
,infixl_ 8 ["shift","rotate","shiftL","shiftR","rotateL","rotateR"]
,infixl_ 7 [".&."]
,infixl_ 6 ["xor"]
,infix_ 6 [":+"]
,infixl_ 5 [".|."]
,infixr_ 5 ["+:+","<++","<+>"] -- fixity conflict for +++ between ReadP and Arrow
,infix_ 5 ["\\\\"]
,infixl_ 4 ["<$>","<$","<*>","<*","*>","<**>"]
,infix_ 4 ["elemP","notElemP"]
,infixl_ 3 ["<|>"]
,infixr_ 3 ["&&&","***"]
,infixr_ 2 ["+++","|||"]
,infixr_ 1 ["<=<",">=>",">>>","<<<","^<<","<<^","^>>",">>^"]
,infixl_ 0 ["on"]
,infixr_ 0 ["par","pseq"]
]
infixr_, infixl_, infix_ :: Int -> [String] -> [(String,Fixity)]
infixr_ = fixity InfixR
infixl_ = fixity InfixL
infix_ = fixity InfixN
-- Internal: help function for the above definitions.
fixity :: FixityDirection -> Int -> [String] -> [(String, Fixity)]
fixity a p = map (,Fixity p a)
|
bitemyapp/apply-refact
|
src/Refact/Fixity.hs
|
Haskell
|
bsd-3-clause
| 5,164
|
{-
Copyright (c) 2015, Joshua Brot
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above
copyright notice, this list of conditions and the following
disclaimer in the documentation and/or other materials provided
with the distribution.
* Neither the name of Joshua Brot nor the names of other
contributors may be used to endorse or promote products derived
from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-}
import Distribution.Simple
main = defaultMain
|
Pamelloes/AAGenAlg
|
Setup.hs
|
Haskell
|
bsd-3-clause
| 1,572
|
module IptAdmin.AddChainPage where
import Control.Monad.Error
import Happstack.Server.SimpleHTTP
import IptAdmin.EditChainForm.Parse
import IptAdmin.EditChainForm.Render
import IptAdmin.Render
import IptAdmin.System
import IptAdmin.Types
import IptAdmin.Utils
import Iptables
import Iptables.Types
import Text.ParserCombinators.Parsec.Prim hiding (State (..))
import Text.Blaze.Renderer.Pretty (renderHtml)
pageHandlers :: IptAdmin Response
pageHandlers = msum [ methodSP GET pageHandlerGet
, methodSP POST pageHandlerPost
]
pageHandlerGet :: IptAdmin Response
pageHandlerGet = do
tableName <- getInputNonEmptyString "table"
return $ buildResponse $ renderHtml $ do
editChainForm (tableName, "") "" Nothing
pageHandlerPost :: IptAdmin Response
pageHandlerPost = do
tableName <- getInputNonEmptyString "table"
newChainName <- getInputString "newChainName"
let newChainNameE = parse parseChainName "chain name" newChainName
case newChainNameE of
Left e -> return $ buildResponse $ renderHtml $ do
editChainForm (tableName, "") newChainName $ Just $ "Parameter error: " ++ show e
Right newChainName' -> do
iptables <- getIptables
table <- case tableName of
"filter" -> return $ tFilter iptables
"nat" -> return $ tNat iptables
"mangle" -> return $ tMangle iptables
"raw" -> return $ tRaw iptables
a -> throwError $ "Invalid table parameter: " ++ a
let checkChainMay = getChainByName newChainName' table
case checkChainMay of
Just _ -> return $ buildResponse $ renderHtml $ do
editChainForm (tableName, "") newChainName' $ Just "A chain with the same name already exists"
Nothing -> do
submit <- getInputString "submit"
case submit of
"Check" -> return $ buildResponse $ renderHtml $ do
editChainForm (tableName, "") newChainName' $ Just "The name is valid"
"Submit" -> do
tryChange $ addChain tableName newChainName'
-- redir $ "/show?table=" ++ tableName ++ bookmarkForJump newChainName' Nothing
return $ buildResponse $ "ok:" ++ newChainName'
a -> throwError $ "Invalid value for 'submit' parameter: " ++ a
|
etarasov/iptadmin
|
src/IptAdmin/AddChainPage.hs
|
Haskell
|
bsd-3-clause
| 2,515
|
{-# LANGUAGE RecordWildCards #-}
module Main (main) where
import Control.Monad
import Data.Binary.Get
import qualified Data.ByteString.Lazy as BL
import Data.List
import Text.Printf
import Codec.Tracker.S3M
import Codec.Tracker.S3M.Header
import Codec.Tracker.S3M.Instrument
import Codec.Tracker.S3M.Instrument.Adlib
import Codec.Tracker.S3M.Instrument.PCM
import Codec.Tracker.S3M.Pattern
pprintInstrument :: Instrument -> IO ()
pprintInstrument Instrument{..} = do
BL.putStrLn $ BL.pack fileName
forM_ pcmSample pprintPCMSample
forM_ adlibSample pprintAdlibSample
pprintAdlibSample :: AdlibSample -> IO ()
pprintAdlibSample AdlibSample{..} = do
putStr "Adlib: "
BL.putStrLn $ BL.pack title
pprintPCMSample :: PCMSample -> IO ()
pprintPCMSample PCMSample{..} = do
putStrLn "PCM: "
BL.putStrLn $ BL.pack title
pprintHeader :: Header -> IO ()
pprintHeader Header{..} = do
putStr "Song name.......: "
BL.putStrLn $ BL.pack songName
putStrLn $ "Orders..........: " ++ show songLength
putStrLn $ "Instruments.....: " ++ show numInstruments
putStrLn $ "Patterns........: " ++ show numPatterns
putStrLn $ "Version.........: " ++ show trackerVersion
putStrLn $ "Global volume...: " ++ show globalVolume
putStrLn $ "Initial speed...: " ++ show initialSpeed
putStrLn $ "Initial tempo...: " ++ show initialTempo
putStrLn $ "Mix volume......: " ++ show mixVolume
putStrLn $ "Channel settings: " ++ show channelSettings
pprintPattern :: Pattern -> IO ()
pprintPattern Pattern{..} = do
putStrLn $ "Packed length: " ++ show (packedSize Pattern{..})
mapM_ putStrLn (map (foldr (++) ([])) (map (intersperse " | ") (map (map show) rows)))
main :: IO ()
main = do
file <- BL.getContents
let s3m = runGet getModule file
putStrLn "Header:"
putStrLn "======="
pprintHeader $ header s3m
putStrLn "<>"
print (orders s3m)
putStrLn "<>"
putStrLn "Instruments:"
putStrLn "============"
mapM_ pprintInstrument (instruments s3m)
putStrLn "<>"
putStrLn "Patterns:"
putStrLn "========="
mapM_ pprintPattern (patterns s3m)
putStrLn "<>"
|
riottracker/modfile
|
examples/readS3M.hs
|
Haskell
|
bsd-3-clause
| 2,265
|
{-# LANGUAGE ScopedTypeVariables #-}
import Data.Typeable
import Control.Exception
import GHC.IO.Exception
import System.IO
import Network
main :: IO ()
main = test `catch` ioHandle
test :: IO ()
test = do
h <- connectTo "localhost" $ PortNumber 54492
hGetLine h >>= putStrLn
(hGetLine h >>= putStrLn) `catch` ioHandle
hGetLine h >>= putStrLn
ioHandle :: IOException -> IO ()
ioHandle e = do
print $ ioe_handle e
print $ ioe_type e
print $ ioe_location e
print $ ioe_description e
print $ ioe_errno e
print $ ioe_filename e
|
YoshikuniJujo/xmpipe
|
test/exClient.hs
|
Haskell
|
bsd-3-clause
| 539
|
module Graphics.Pastel.WX
( module Graphics.Pastel.WX.Draw
, module Graphics.Pastel.WX.Test
) where
import Graphics.Pastel.WX.Draw
import Graphics.Pastel.WX.Test
|
willdonnelly/pastel
|
Graphics/Pastel/WX.hs
|
Haskell
|
bsd-3-clause
| 175
|
module GameStage
( GameStage
, gameStage
) where
import Control.Applicative
import Control.Monad
import Data.Set
import qualified Data.Map as M
import qualified Data.List as L
import Data.Unique
import qualified Class.GameScene as GS
import Class.Sprite
import KeyBind
import GlobalValue
import qualified Sound as SO
import MissScene (missScene)
import ClearScene (clearScene)
import GameStage.GameObject
import qualified GameStage.Player as P
import qualified GameStage.Bullet as B
import qualified GameStage.Enemy as E
import qualified GameStage.EnemyManager as EM
import qualified GameStage.BGManager as BG
import GameStage.Collider
data GameStage = GameStage
{ player :: P.Player
, playerBullets :: B.PlayerBullets
, enemies :: M.Map Unique E.Enemy
, enemyList :: EM.EnemyList
, enemyBullets :: M.Map Unique B.Bullet
, bgStruct :: BG.BGStruct
, time :: Integer
}
data GameOver = Continue | Miss | Clear
instance GS.GameScene GameStage where
update gv@(GV {keyset = key}) scene = do
case member QUIT key of
True -> return GS.EndScene
False -> do
newScene <- ( update >=> shoot >=> spawnEnemy >=> hitEnemy >=> hitPlayer >=> shootEnemy ) scene
case gameOver newScene of
Continue -> return $ GS.Replace newScene
Miss -> GS.dispose newScene >> GS.Replace <$> missScene
Clear -> GS.dispose newScene >> GS.Replace <$> clearScene 0
where
gameOver :: GameStage -> GameOver
gameOver GameStage { player = p
, enemies = es
, enemyList = el
}
| P.gameOver p = Miss
| L.null el && M.null es = Clear
| otherwise = Continue
hitPlayer stage@GameStage { player = p
, enemies = es
, enemyBullets = ebs
}
= do let ds = (Prelude.map gameObject . M.elems) es ++
(Prelude.map gameObject . M.elems) ebs
hits = or $ Prelude.map (within (gameObject p)) ds
return $ stage { player = P.hit hits p
}
hitEnemy stage@(GameStage { playerBullets = pbs
, enemies = es
})
= do let list = collide (B.container pbs) es
kpbs = Prelude.map fst list
kes = Prelude.map snd list
return stage { playerBullets = pbs { B.container = Prelude.foldl
(flip M.delete)
(B.container pbs)
kpbs
}
, enemies = Prelude.foldl
(flip M.delete)
es
kes
}
spawnEnemy stage@(GameStage { enemies = es
, enemyList = el
, time = t
})
= do let (newEs, newEl) = EM.spawnEnemy t el
nes <- mapM (\x -> (,) <$> newUnique <*> pure x) newEs
return $ stage { enemies = Prelude.foldl
((flip . uncurry) M.insert)
es
nes
, enemyList = newEl
}
update :: GameStage -> IO GameStage
update (GameStage p pbs es el ebs bgs time)
= return $ GameStage (P.update key p)
(B.updatePB pbs)
(M.mapMaybe E.update es)
el
(M.mapMaybe B.update ebs)
(BG.update bgs)
(time + 1)
shoot :: GameStage -> IO GameStage
shoot stage@(GameStage { player = p
, playerBullets = pbs
})
= do let ppos = (pos.gameObject) p
(bt,newP) = P.shoot (member A key) p
newPbs <- case bt of
Nothing -> return pbs
Just t -> do
SO.writeChan (sound gv) (SO.Shoot)
B.spawnPB t ppos pbs
return $ stage { player = newP
, playerBullets = newPbs
}
shootEnemy :: GameStage -> IO GameStage
shootEnemy stage@GameStage { enemies = es
, enemyBullets = ebs
}
= do
let newB = concatMap E.getBullets (M.elems es)
nebs <- mapM (\x -> (,) <$> newUnique <*> pure x) newB
return $ stage { enemyBullets = Prelude.foldl
((flip . uncurry) M.insert)
ebs
nebs
}
render (GameStage { player = p
, playerBullets = pbs
, enemies = es
, enemyBullets = ebs
, bgStruct = bgs
}) = do
BG.render bgs
P.render p
render pbs
mapM_ (render.gameObject) $ M.elems es
mapM_ (render.gameObject) $ M.elems ebs
BG.renderRim bgs
return ()
dispose GameStage { bgStruct = bgs
}
= do BG.dispose bgs
gameStage :: IO GameStage
gameStage = GameStage
<$> P.player
<*> B.playerBullets
<*> pure M.empty
<*> pure EM.enemies
<*> pure M.empty
<*> BG.load
<*> pure 0
|
c000/PaperPuppet
|
src/GameStage.hs
|
Haskell
|
bsd-3-clause
| 5,848
|
{-
(c) The University of Glasgow 2006
(c) The GRASP/AQUA Project, Glasgow University, 1992-1998
Utility functions on @Core@ syntax
-}
{-# LANGUAGE CPP #-}
-- | Commonly useful utilites for manipulating the Core language
module CoreUtils (
-- * Constructing expressions
mkCast,
mkTick, mkTicks, mkTickNoHNF, tickHNFArgs,
bindNonRec, needsCaseBinding,
mkAltExpr,
-- * Taking expressions apart
findDefault, addDefault, findAlt, isDefaultAlt,
mergeAlts, trimConArgs,
filterAlts, combineIdenticalAlts, refineDefaultAlt,
-- * Properties of expressions
exprType, coreAltType, coreAltsType,
exprIsDupable, exprIsTrivial, getIdFromTrivialExpr, exprIsBottom,
exprIsCheap, exprIsExpandable, exprIsCheap', CheapAppFun,
exprIsHNF, exprOkForSpeculation, exprOkForSideEffects, exprIsWorkFree,
exprIsBig, exprIsConLike,
rhsIsStatic, isCheapApp, isExpandableApp,
-- * Equality
cheapEqExpr, cheapEqExpr', eqExpr,
diffExpr, diffBinds,
-- * Eta reduction
tryEtaReduce,
-- * Manipulating data constructors and types
exprToType, exprToCoercion_maybe,
applyTypeToArgs, applyTypeToArg,
dataConRepInstPat, dataConRepFSInstPat,
isEmptyTy,
-- * Working with ticks
stripTicksTop, stripTicksTopE, stripTicksTopT,
stripTicksE, stripTicksT
) where
#include "HsVersions.h"
import CoreSyn
import PprCore
import CoreFVs( exprFreeVars )
import Var
import SrcLoc
import VarEnv
import VarSet
import Name
import Literal
import DataCon
import PrimOp
import Id
import IdInfo
import Type
import Coercion
import TyCon
import Unique
import Outputable
import TysPrim
import DynFlags
import FastString
import Maybes
import ListSetOps ( minusList )
import Platform
import Util
import Pair
import Data.Function ( on )
import Data.List
import Data.Ord ( comparing )
import OrdList
{-
************************************************************************
* *
\subsection{Find the type of a Core atom/expression}
* *
************************************************************************
-}
exprType :: CoreExpr -> Type
-- ^ Recover the type of a well-typed Core expression. Fails when
-- applied to the actual 'CoreSyn.Type' expression as it cannot
-- really be said to have a type
exprType (Var var) = idType var
exprType (Lit lit) = literalType lit
exprType (Coercion co) = coercionType co
exprType (Let bind body)
| NonRec tv rhs <- bind -- See Note [Type bindings]
, Type ty <- rhs = substTyWithUnchecked [tv] [ty] (exprType body)
| otherwise = exprType body
exprType (Case _ _ ty _) = ty
exprType (Cast _ co) = pSnd (coercionKind co)
exprType (Tick _ e) = exprType e
exprType (Lam binder expr) = mkPiType binder (exprType expr)
exprType e@(App _ _)
= case collectArgs e of
(fun, args) -> applyTypeToArgs e (exprType fun) args
exprType other = pprTrace "exprType" (pprCoreExpr other) alphaTy
coreAltType :: CoreAlt -> Type
-- ^ Returns the type of the alternatives right hand side
coreAltType (_,bs,rhs)
| any bad_binder bs = expandTypeSynonyms ty
| otherwise = ty -- Note [Existential variables and silly type synonyms]
where
ty = exprType rhs
free_tvs = tyCoVarsOfType ty
bad_binder b = b `elemVarSet` free_tvs
coreAltsType :: [CoreAlt] -> Type
-- ^ Returns the type of the first alternative, which should be the same as for all alternatives
coreAltsType (alt:_) = coreAltType alt
coreAltsType [] = panic "corAltsType"
{-
Note [Type bindings]
~~~~~~~~~~~~~~~~~~~~
Core does allow type bindings, although such bindings are
not much used, except in the output of the desuguarer.
Example:
let a = Int in (\x:a. x)
Given this, exprType must be careful to substitute 'a' in the
result type (Trac #8522).
Note [Existential variables and silly type synonyms]
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Consider
data T = forall a. T (Funny a)
type Funny a = Bool
f :: T -> Bool
f (T x) = x
Now, the type of 'x' is (Funny a), where 'a' is existentially quantified.
That means that 'exprType' and 'coreAltsType' may give a result that *appears*
to mention an out-of-scope type variable. See Trac #3409 for a more real-world
example.
Various possibilities suggest themselves:
- Ignore the problem, and make Lint not complain about such variables
- Expand all type synonyms (or at least all those that discard arguments)
This is tricky, because at least for top-level things we want to
retain the type the user originally specified.
- Expand synonyms on the fly, when the problem arises. That is what
we are doing here. It's not too expensive, I think.
Note that there might be existentially quantified coercion variables, too.
-}
-- Not defined with applyTypeToArg because you can't print from CoreSyn.
applyTypeToArgs :: CoreExpr -> Type -> [CoreExpr] -> Type
-- ^ A more efficient version of 'applyTypeToArg' when we have several arguments.
-- The first argument is just for debugging, and gives some context
applyTypeToArgs e op_ty args
= go op_ty args
where
go op_ty [] = op_ty
go op_ty (Type ty : args) = go_ty_args op_ty [ty] args
go op_ty (Coercion co : args) = go_ty_args op_ty [mkCoercionTy co] args
go op_ty (_ : args) | Just (_, res_ty) <- splitFunTy_maybe op_ty
= go res_ty args
go _ _ = pprPanic "applyTypeToArgs" panic_msg
-- go_ty_args: accumulate type arguments so we can instantiate all at once
go_ty_args op_ty rev_tys (Type ty : args)
= go_ty_args op_ty (ty:rev_tys) args
go_ty_args op_ty rev_tys (Coercion co : args)
= go_ty_args op_ty (mkCoercionTy co : rev_tys) args
go_ty_args op_ty rev_tys args
= go (applyTysD panic_msg_w_hdr op_ty (reverse rev_tys)) args
panic_msg_w_hdr = hang (text "applyTypeToArgs") 2 panic_msg
panic_msg = vcat [ text "Expression:" <+> pprCoreExpr e
, text "Type:" <+> ppr op_ty
, text "Args:" <+> ppr args ]
{-
************************************************************************
* *
\subsection{Attaching notes}
* *
************************************************************************
-}
-- | Wrap the given expression in the coercion safely, dropping
-- identity coercions and coalescing nested coercions
mkCast :: CoreExpr -> Coercion -> CoreExpr
mkCast e co
| ASSERT2( coercionRole co == Representational
, text "coercion" <+> ppr co <+> ptext (sLit "passed to mkCast")
<+> ppr e <+> text "has wrong role" <+> ppr (coercionRole co) )
isReflCo co
= e
mkCast (Coercion e_co) co
| isCoercionType (pSnd (coercionKind co))
-- The guard here checks that g has a (~#) on both sides,
-- otherwise decomposeCo fails. Can in principle happen
-- with unsafeCoerce
= Coercion (mkCoCast e_co co)
mkCast (Cast expr co2) co
= WARN(let { Pair from_ty _to_ty = coercionKind co;
Pair _from_ty2 to_ty2 = coercionKind co2} in
not (from_ty `eqType` to_ty2),
vcat ([ text "expr:" <+> ppr expr
, text "co2:" <+> ppr co2
, text "co:" <+> ppr co ]) )
mkCast expr (mkTransCo co2 co)
mkCast (Tick t expr) co
= Tick t (mkCast expr co)
mkCast expr co
= let Pair from_ty _to_ty = coercionKind co in
WARN( not (from_ty `eqType` exprType expr),
text "Trying to coerce" <+> text "(" <> ppr expr
$$ text "::" <+> ppr (exprType expr) <> text ")"
$$ ppr co $$ ppr (coercionType co) )
(Cast expr co)
-- | Wraps the given expression in the source annotation, dropping the
-- annotation if possible.
mkTick :: Tickish Id -> CoreExpr -> CoreExpr
mkTick t orig_expr = mkTick' id id orig_expr
where
-- Some ticks (cost-centres) can be split in two, with the
-- non-counting part having laxer placement properties.
canSplit = tickishCanSplit t && tickishPlace (mkNoCount t) /= tickishPlace t
mkTick' :: (CoreExpr -> CoreExpr) -- ^ apply after adding tick (float through)
-> (CoreExpr -> CoreExpr) -- ^ apply before adding tick (float with)
-> CoreExpr -- ^ current expression
-> CoreExpr
mkTick' top rest expr = case expr of
-- Cost centre ticks should never be reordered relative to each
-- other. Therefore we can stop whenever two collide.
Tick t2 e
| ProfNote{} <- t2, ProfNote{} <- t -> top $ Tick t $ rest expr
-- Otherwise we assume that ticks of different placements float
-- through each other.
| tickishPlace t2 /= tickishPlace t -> mkTick' (top . Tick t2) rest e
-- For annotations this is where we make sure to not introduce
-- redundant ticks.
| tickishContains t t2 -> mkTick' top rest e
| tickishContains t2 t -> orig_expr
| otherwise -> mkTick' top (rest . Tick t2) e
-- Ticks don't care about types, so we just float all ticks
-- through them. Note that it's not enough to check for these
-- cases top-level. While mkTick will never produce Core with type
-- expressions below ticks, such constructs can be the result of
-- unfoldings. We therefore make an effort to put everything into
-- the right place no matter what we start with.
Cast e co -> mkTick' (top . flip Cast co) rest e
Coercion co -> Coercion co
Lam x e
-- Always float through type lambdas. Even for non-type lambdas,
-- floating is allowed for all but the most strict placement rule.
| not (isRuntimeVar x) || tickishPlace t /= PlaceRuntime
-> mkTick' (top . Lam x) rest e
-- If it is both counting and scoped, we split the tick into its
-- two components, often allowing us to keep the counting tick on
-- the outside of the lambda and push the scoped tick inside.
-- The point of this is that the counting tick can probably be
-- floated, and the lambda may then be in a position to be
-- beta-reduced.
| canSplit
-> top $ Tick (mkNoScope t) $ rest $ Lam x $ mkTick (mkNoCount t) e
App f arg
-- Always float through type applications.
| not (isRuntimeArg arg)
-> mkTick' (top . flip App arg) rest f
-- We can also float through constructor applications, placement
-- permitting. Again we can split.
| isSaturatedConApp expr && (tickishPlace t==PlaceCostCentre || canSplit)
-> if tickishPlace t == PlaceCostCentre
then top $ rest $ tickHNFArgs t expr
else top $ Tick (mkNoScope t) $ rest $ tickHNFArgs (mkNoCount t) expr
Var x
| notFunction && tickishPlace t == PlaceCostCentre
-> orig_expr
| notFunction && canSplit
-> top $ Tick (mkNoScope t) $ rest expr
where
-- SCCs can be eliminated on variables provided the variable
-- is not a function. In these cases the SCC makes no difference:
-- the cost of evaluating the variable will be attributed to its
-- definition site. When the variable refers to a function, however,
-- an SCC annotation on the variable affects the cost-centre stack
-- when the function is called, so we must retain those.
notFunction = not (isFunTy (idType x))
Lit{}
| tickishPlace t == PlaceCostCentre
-> orig_expr
-- Catch-all: Annotate where we stand
_any -> top $ Tick t $ rest expr
mkTicks :: [Tickish Id] -> CoreExpr -> CoreExpr
mkTicks ticks expr = foldr mkTick expr ticks
isSaturatedConApp :: CoreExpr -> Bool
isSaturatedConApp e = go e []
where go (App f a) as = go f (a:as)
go (Var fun) args
= isConLikeId fun && idArity fun == valArgCount args
go (Cast f _) as = go f as
go _ _ = False
mkTickNoHNF :: Tickish Id -> CoreExpr -> CoreExpr
mkTickNoHNF t e
| exprIsHNF e = tickHNFArgs t e
| otherwise = mkTick t e
-- push a tick into the arguments of a HNF (call or constructor app)
tickHNFArgs :: Tickish Id -> CoreExpr -> CoreExpr
tickHNFArgs t e = push t e
where
push t (App f (Type u)) = App (push t f) (Type u)
push t (App f arg) = App (push t f) (mkTick t arg)
push _t e = e
-- | Strip ticks satisfying a predicate from top of an expression
stripTicksTop :: (Tickish Id -> Bool) -> Expr b -> ([Tickish Id], Expr b)
stripTicksTop p = go []
where go ts (Tick t e) | p t = go (t:ts) e
go ts other = (reverse ts, other)
-- | Strip ticks satisfying a predicate from top of an expression,
-- returning the remaining expresion
stripTicksTopE :: (Tickish Id -> Bool) -> Expr b -> Expr b
stripTicksTopE p = go
where go (Tick t e) | p t = go e
go other = other
-- | Strip ticks satisfying a predicate from top of an expression,
-- returning the ticks
stripTicksTopT :: (Tickish Id -> Bool) -> Expr b -> [Tickish Id]
stripTicksTopT p = go []
where go ts (Tick t e) | p t = go (t:ts) e
go ts _ = ts
-- | Completely strip ticks satisfying a predicate from an
-- expression. Note this is O(n) in the size of the expression!
stripTicksE :: (Tickish Id -> Bool) -> Expr b -> Expr b
stripTicksE p expr = go expr
where go (App e a) = App (go e) (go a)
go (Lam b e) = Lam b (go e)
go (Let b e) = Let (go_bs b) (go e)
go (Case e b t as) = Case (go e) b t (map go_a as)
go (Cast e c) = Cast (go e) c
go (Tick t e)
| p t = go e
| otherwise = Tick t (go e)
go other = other
go_bs (NonRec b e) = NonRec b (go e)
go_bs (Rec bs) = Rec (map go_b bs)
go_b (b, e) = (b, go e)
go_a (c,bs,e) = (c,bs, go e)
stripTicksT :: (Tickish Id -> Bool) -> Expr b -> [Tickish Id]
stripTicksT p expr = fromOL $ go expr
where go (App e a) = go e `appOL` go a
go (Lam _ e) = go e
go (Let b e) = go_bs b `appOL` go e
go (Case e _ _ as) = go e `appOL` concatOL (map go_a as)
go (Cast e _) = go e
go (Tick t e)
| p t = t `consOL` go e
| otherwise = go e
go _ = nilOL
go_bs (NonRec _ e) = go e
go_bs (Rec bs) = concatOL (map go_b bs)
go_b (_, e) = go e
go_a (_, _, e) = go e
{-
************************************************************************
* *
\subsection{Other expression construction}
* *
************************************************************************
-}
bindNonRec :: Id -> CoreExpr -> CoreExpr -> CoreExpr
-- ^ @bindNonRec x r b@ produces either:
--
-- > let x = r in b
--
-- or:
--
-- > case r of x { _DEFAULT_ -> b }
--
-- depending on whether we have to use a @case@ or @let@
-- binding for the expression (see 'needsCaseBinding').
-- It's used by the desugarer to avoid building bindings
-- that give Core Lint a heart attack, although actually
-- the simplifier deals with them perfectly well. See
-- also 'MkCore.mkCoreLet'
bindNonRec bndr rhs body
| needsCaseBinding (idType bndr) rhs = Case rhs bndr (exprType body) [(DEFAULT, [], body)]
| otherwise = Let (NonRec bndr rhs) body
-- | Tests whether we have to use a @case@ rather than @let@ binding for this expression
-- as per the invariants of 'CoreExpr': see "CoreSyn#let_app_invariant"
needsCaseBinding :: Type -> CoreExpr -> Bool
needsCaseBinding ty rhs = isUnliftedType ty && not (exprOkForSpeculation rhs)
-- Make a case expression instead of a let
-- These can arise either from the desugarer,
-- or from beta reductions: (\x.e) (x +# y)
mkAltExpr :: AltCon -- ^ Case alternative constructor
-> [CoreBndr] -- ^ Things bound by the pattern match
-> [Type] -- ^ The type arguments to the case alternative
-> CoreExpr
-- ^ This guy constructs the value that the scrutinee must have
-- given that you are in one particular branch of a case
mkAltExpr (DataAlt con) args inst_tys
= mkConApp con (map Type inst_tys ++ varsToCoreExprs args)
mkAltExpr (LitAlt lit) [] []
= Lit lit
mkAltExpr (LitAlt _) _ _ = panic "mkAltExpr LitAlt"
mkAltExpr DEFAULT _ _ = panic "mkAltExpr DEFAULT"
{-
************************************************************************
* *
Operations oer case alternatives
* *
************************************************************************
The default alternative must be first, if it exists at all.
This makes it easy to find, though it makes matching marginally harder.
-}
-- | Extract the default case alternative
findDefault :: [(AltCon, [a], b)] -> ([(AltCon, [a], b)], Maybe b)
findDefault ((DEFAULT,args,rhs) : alts) = ASSERT( null args ) (alts, Just rhs)
findDefault alts = (alts, Nothing)
addDefault :: [(AltCon, [a], b)] -> Maybe b -> [(AltCon, [a], b)]
addDefault alts Nothing = alts
addDefault alts (Just rhs) = (DEFAULT, [], rhs) : alts
isDefaultAlt :: (AltCon, a, b) -> Bool
isDefaultAlt (DEFAULT, _, _) = True
isDefaultAlt _ = False
-- | Find the case alternative corresponding to a particular
-- constructor: panics if no such constructor exists
findAlt :: AltCon -> [(AltCon, a, b)] -> Maybe (AltCon, a, b)
-- A "Nothing" result *is* legitmiate
-- See Note [Unreachable code]
findAlt con alts
= case alts of
(deflt@(DEFAULT,_,_):alts) -> go alts (Just deflt)
_ -> go alts Nothing
where
go [] deflt = deflt
go (alt@(con1,_,_) : alts) deflt
= case con `cmpAltCon` con1 of
LT -> deflt -- Missed it already; the alts are in increasing order
EQ -> Just alt
GT -> ASSERT( not (con1 == DEFAULT) ) go alts deflt
{- Note [Unreachable code]
~~~~~~~~~~~~~~~~~~~~~~~~~~
It is possible (although unusual) for GHC to find a case expression
that cannot match. For example:
data Col = Red | Green | Blue
x = Red
f v = case x of
Red -> ...
_ -> ...(case x of { Green -> e1; Blue -> e2 })...
Suppose that for some silly reason, x isn't substituted in the case
expression. (Perhaps there's a NOINLINE on it, or profiling SCC stuff
gets in the way; cf Trac #3118.) Then the full-lazines pass might produce
this
x = Red
lvl = case x of { Green -> e1; Blue -> e2 })
f v = case x of
Red -> ...
_ -> ...lvl...
Now if x gets inlined, we won't be able to find a matching alternative
for 'Red'. That's because 'lvl' is unreachable. So rather than crashing
we generate (error "Inaccessible alternative").
Similar things can happen (augmented by GADTs) when the Simplifier
filters down the matching alternatives in Simplify.rebuildCase.
-}
---------------------------------
mergeAlts :: [(AltCon, a, b)] -> [(AltCon, a, b)] -> [(AltCon, a, b)]
-- ^ Merge alternatives preserving order; alternatives in
-- the first argument shadow ones in the second
mergeAlts [] as2 = as2
mergeAlts as1 [] = as1
mergeAlts (a1:as1) (a2:as2)
= case a1 `cmpAlt` a2 of
LT -> a1 : mergeAlts as1 (a2:as2)
EQ -> a1 : mergeAlts as1 as2 -- Discard a2
GT -> a2 : mergeAlts (a1:as1) as2
---------------------------------
trimConArgs :: AltCon -> [CoreArg] -> [CoreArg]
-- ^ Given:
--
-- > case (C a b x y) of
-- > C b x y -> ...
--
-- We want to drop the leading type argument of the scrutinee
-- leaving the arguments to match against the pattern
trimConArgs DEFAULT args = ASSERT( null args ) []
trimConArgs (LitAlt _) args = ASSERT( null args ) []
trimConArgs (DataAlt dc) args = dropList (dataConUnivTyVars dc) args
filterAlts :: TyCon -- ^ Type constructor of scrutinee's type (used to prune possibilities)
-> [Type] -- ^ And its type arguments
-> [AltCon] -- ^ 'imposs_cons': constructors known to be impossible due to the form of the scrutinee
-> [(AltCon, [Var], a)] -- ^ Alternatives
-> ([AltCon], [(AltCon, [Var], a)])
-- Returns:
-- 1. Constructors that will never be encountered by the
-- *default* case (if any). A superset of imposs_cons
-- 2. The new alternatives, trimmed by
-- a) remove imposs_cons
-- b) remove constructors which can't match because of GADTs
-- and with the DEFAULT expanded to a DataAlt if there is exactly
-- remaining constructor that can match
--
-- NB: the final list of alternatives may be empty:
-- This is a tricky corner case. If the data type has no constructors,
-- which GHC allows, or if the imposs_cons covers all constructors (after taking
-- account of GADTs), then no alternatives can match.
--
-- If callers need to preserve the invariant that there is always at least one branch
-- in a "case" statement then they will need to manually add a dummy case branch that just
-- calls "error" or similar.
filterAlts _tycon inst_tys imposs_cons alts
= (imposs_deflt_cons, addDefault trimmed_alts maybe_deflt)
where
(alts_wo_default, maybe_deflt) = findDefault alts
alt_cons = [con | (con,_,_) <- alts_wo_default]
trimmed_alts = filterOut (impossible_alt inst_tys) alts_wo_default
imposs_deflt_cons = nub (imposs_cons ++ alt_cons)
-- "imposs_deflt_cons" are handled
-- EITHER by the context,
-- OR by a non-DEFAULT branch in this case expression.
impossible_alt :: [Type] -> (AltCon, a, b) -> Bool
impossible_alt _ (con, _, _) | con `elem` imposs_cons = True
impossible_alt inst_tys (DataAlt con, _, _) = dataConCannotMatch inst_tys con
impossible_alt _ _ = False
refineDefaultAlt :: [Unique] -> TyCon -> [Type]
-> [AltCon] -- Constructors tha cannot match the DEFAULT (if any)
-> [CoreAlt]
-> (Bool, [CoreAlt])
-- Refine the default alterantive to a DataAlt,
-- if there is a unique way to do so
refineDefaultAlt us tycon tys imposs_deflt_cons all_alts
| (DEFAULT,_,rhs) : rest_alts <- all_alts
, isAlgTyCon tycon -- It's a data type, tuple, or unboxed tuples.
, not (isNewTyCon tycon) -- We can have a newtype, if we are just doing an eval:
-- case x of { DEFAULT -> e }
-- and we don't want to fill in a default for them!
, Just all_cons <- tyConDataCons_maybe tycon
, let imposs_data_cons = [con | DataAlt con <- imposs_deflt_cons] -- We now know it's a data type
impossible con = con `elem` imposs_data_cons || dataConCannotMatch tys con
= case filterOut impossible all_cons of
-- Eliminate the default alternative
-- altogether if it can't match:
[] -> (False, rest_alts)
-- It matches exactly one constructor, so fill it in:
[con] -> (True, mergeAlts rest_alts [(DataAlt con, ex_tvs ++ arg_ids, rhs)])
-- We need the mergeAlts to keep the alternatives in the right order
where
(ex_tvs, arg_ids) = dataConRepInstPat us con tys
-- It matches more than one, so do nothing
_ -> (False, all_alts)
| debugIsOn, isAlgTyCon tycon, null (tyConDataCons tycon)
, not (isFamilyTyCon tycon || isAbstractTyCon tycon)
-- Check for no data constructors
-- This can legitimately happen for abstract types and type families,
-- so don't report that
= (False, all_alts)
| otherwise -- The common case
= (False, all_alts)
{- Note [Combine identical alternatives]
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
If several alternatives are identical, merge them into a single
DEFAULT alternative. I've occasionally seen this making a big
difference:
case e of =====> case e of
C _ -> f x D v -> ....v....
D v -> ....v.... DEFAULT -> f x
DEFAULT -> f x
The point is that we merge common RHSs, at least for the DEFAULT case.
[One could do something more elaborate but I've never seen it needed.]
To avoid an expensive test, we just merge branches equal to the *first*
alternative; this picks up the common cases
a) all branches equal
b) some branches equal to the DEFAULT (which occurs first)
The case where Combine Identical Alternatives transformation showed up
was like this (base/Foreign/C/Err/Error.hs):
x | p `is` 1 -> e1
| p `is` 2 -> e2
...etc...
where @is@ was something like
p `is` n = p /= (-1) && p == n
This gave rise to a horrible sequence of cases
case p of
(-1) -> $j p
1 -> e1
DEFAULT -> $j p
and similarly in cascade for all the join points!
NB: it's important that all this is done in [InAlt], *before* we work
on the alternatives themselves, because Simpify.simplAlt may zap the
occurrence info on the binders in the alternatives, which in turn
defeats combineIdenticalAlts (see Trac #7360).
Note [Care with impossible-constructors when combining alternatives]
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Suppose we have (Trac #10538)
data T = A | B | C | D
case x::T of (Imposs-default-cons {A,B})
DEFAULT -> e1
A -> e2
B -> e1
When calling combineIdentialAlts, we'll have computed that the
"impossible constructors" for the DEFAULT alt is {A,B}, since if x is
A or B we'll take the other alternatives. But suppose we combine B
into the DEFAULT, to get
case x::T of (Imposs-default-cons {A})
DEFAULT -> e1
A -> e2
Then we must be careful to trim the impossible constructors to just {A},
else we risk compiling 'e1' wrong!
Not only that, but we take care when there is no DEFAULT beforehand,
because we are introducing one. Consider
case x of (Imposs-default-cons {A,B,C})
A -> e1
B -> e2
C -> e1
Then when combining the A and C alternatives we get
case x of (Imposs-default-cons {B})
DEFAULT -> e1
B -> e2
Note that we have a new DEFAULT branch that we didn't have before. So
we need delete from the "impossible-default-constructors" all the
known-con alternatives that we have eliminated. (In Trac #11172 we
missed the first one.)
-}
combineIdenticalAlts :: [AltCon] -- Constructors that cannot match DEFAULT
-> [CoreAlt]
-> (Bool, -- True <=> something happened
[AltCon], -- New contructors that cannot match DEFAULT
[CoreAlt]) -- New alternatives
-- See Note [Combine identical alternatives]
-- True <=> we did some combining, result is a single DEFAULT alternative
combineIdenticalAlts imposs_deflt_cons ((con1,bndrs1,rhs1) : rest_alts)
| all isDeadBinder bndrs1 -- Remember the default
, not (null elim_rest) -- alternative comes first
= (True, imposs_deflt_cons', deflt_alt : filtered_rest)
where
(elim_rest, filtered_rest) = partition identical_to_alt1 rest_alts
deflt_alt = (DEFAULT, [], mkTicks (concat tickss) rhs1)
-- See Note [Care with impossible-constructors when combining alternatives]
imposs_deflt_cons' = imposs_deflt_cons `minusList` elim_cons
elim_cons = elim_con1 ++ map fstOf3 elim_rest
elim_con1 = case con1 of -- Don't forget con1!
DEFAULT -> [] -- See Note [
_ -> [con1]
cheapEqTicked e1 e2 = cheapEqExpr' tickishFloatable e1 e2
identical_to_alt1 (_con,bndrs,rhs)
= all isDeadBinder bndrs && rhs `cheapEqTicked` rhs1
tickss = map (stripTicksT tickishFloatable . thdOf3) elim_rest
combineIdenticalAlts imposs_cons alts
= (False, imposs_cons, alts)
{- *********************************************************************
* *
exprIsTrivial
* *
************************************************************************
Note [exprIsTrivial]
~~~~~~~~~~~~~~~~~~~~
@exprIsTrivial@ is true of expressions we are unconditionally happy to
duplicate; simple variables and constants, and type
applications. Note that primop Ids aren't considered
trivial unless
Note [Variable are trivial]
~~~~~~~~~~~~~~~~~~~~~~~~~~~
There used to be a gruesome test for (hasNoBinding v) in the
Var case:
exprIsTrivial (Var v) | hasNoBinding v = idArity v == 0
The idea here is that a constructor worker, like \$wJust, is
really short for (\x -> \$wJust x), because \$wJust has no binding.
So it should be treated like a lambda. Ditto unsaturated primops.
But now constructor workers are not "have-no-binding" Ids. And
completely un-applied primops and foreign-call Ids are sufficiently
rare that I plan to allow them to be duplicated and put up with
saturating them.
Note [Tick trivial]
~~~~~~~~~~~~~~~~~~~
Ticks are only trivial if they are pure annotations. If we treat
"tick<n> x" as trivial, it will be inlined inside lambdas and the
entry count will be skewed, for example. Furthermore "scc<n> x" will
turn into just "x" in mkTick.
Note [Empty case is trivial]
~~~~~~~~~~~~~~~~~~~~~~~~~~~~
The expression (case (x::Int) Bool of {}) is just a type-changing
case used when we are sure that 'x' will not return. See
Note [Empty case alternatives] in CoreSyn.
If the scrutinee is trivial, then so is the whole expression; and the
CoreToSTG pass in fact drops the case expression leaving only the
scrutinee.
Having more trivial expressions is good. Moreover, if we don't treat
it as trivial we may land up with let-bindings like
let v = case x of {} in ...
and after CoreToSTG that gives
let v = x in ...
and that confuses the code generator (Trac #11155). So best to kill
it off at source.
-}
exprIsTrivial :: CoreExpr -> Bool
exprIsTrivial (Var _) = True -- See Note [Variables are trivial]
exprIsTrivial (Type _) = True
exprIsTrivial (Coercion _) = True
exprIsTrivial (Lit lit) = litIsTrivial lit
exprIsTrivial (App e arg) = not (isRuntimeArg arg) && exprIsTrivial e
exprIsTrivial (Tick t e) = not (tickishIsCode t) && exprIsTrivial e
-- See Note [Tick trivial]
exprIsTrivial (Cast e _) = exprIsTrivial e
exprIsTrivial (Lam b body) = not (isRuntimeVar b) && exprIsTrivial body
exprIsTrivial (Case e _ _ []) = exprIsTrivial e -- See Note [Empty case is trivial]
exprIsTrivial _ = False
{-
When substituting in a breakpoint we need to strip away the type cruft
from a trivial expression and get back to the Id. The invariant is
that the expression we're substituting was originally trivial
according to exprIsTrivial.
-}
getIdFromTrivialExpr :: CoreExpr -> Id
getIdFromTrivialExpr e = go e
where go (Var v) = v
go (App f t) | not (isRuntimeArg t) = go f
go (Tick t e) | not (tickishIsCode t) = go e
go (Cast e _) = go e
go (Lam b e) | not (isRuntimeVar b) = go e
go e = pprPanic "getIdFromTrivialExpr" (ppr e)
{-
exprIsBottom is a very cheap and cheerful function; it may return
False for bottoming expressions, but it never costs much to ask. See
also CoreArity.exprBotStrictness_maybe, but that's a bit more
expensive.
-}
exprIsBottom :: CoreExpr -> Bool
-- See Note [Bottoming expressions]
exprIsBottom e
| isEmptyTy (exprType e)
= True
| otherwise
= go 0 e
where
go n (Var v) = isBottomingId v && n >= idArity v
go n (App e a) | isTypeArg a = go n e
| otherwise = go (n+1) e
go n (Tick _ e) = go n e
go n (Cast e _) = go n e
go n (Let _ e) = go n e
go n (Lam v e) | isTyVar v = go n e
go _ (Case _ _ _ alts) = null alts
-- See Note [Empty case alternatives] in CoreSyn
go _ _ = False
{- Note [Bottoming expressions]
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
A bottoming expression is guaranteed to diverge, or raise an
exception. We can test for it in two different ways, and exprIsBottom
checks for both of these situations:
* Visibly-bottom computations. For example
(error Int "Hello")
is visibly bottom. The strictness analyser also finds out if
a function diverges or raises an exception, and puts that info
in its strictness signature.
* Empty types. If a type is empty, its only inhabitant is bottom.
For example:
data T
f :: T -> Bool
f = \(x:t). case x of Bool {}
Since T has no data constructors, the case alternatives are of course
empty. However note that 'x' is not bound to a visibly-bottom value;
it's the *type* that tells us it's going to diverge.
A GADT may also be empty even though it has constructors:
data T a where
T1 :: a -> T Bool
T2 :: T Int
...(case (x::T Char) of {})...
Here (T Char) is uninhabited. A more realistic case is (Int ~ Bool),
which is likewise uninhabited.
************************************************************************
* *
exprIsDupable
* *
************************************************************************
Note [exprIsDupable]
~~~~~~~~~~~~~~~~~~~~
@exprIsDupable@ is true of expressions that can be duplicated at a modest
cost in code size. This will only happen in different case
branches, so there's no issue about duplicating work.
That is, exprIsDupable returns True of (f x) even if
f is very very expensive to call.
Its only purpose is to avoid fruitless let-binding
and then inlining of case join points
-}
exprIsDupable :: DynFlags -> CoreExpr -> Bool
exprIsDupable dflags e
= isJust (go dupAppSize e)
where
go :: Int -> CoreExpr -> Maybe Int
go n (Type {}) = Just n
go n (Coercion {}) = Just n
go n (Var {}) = decrement n
go n (Tick _ e) = go n e
go n (Cast e _) = go n e
go n (App f a) | Just n' <- go n a = go n' f
go n (Lit lit) | litIsDupable dflags lit = decrement n
go _ _ = Nothing
decrement :: Int -> Maybe Int
decrement 0 = Nothing
decrement n = Just (n-1)
dupAppSize :: Int
dupAppSize = 8 -- Size of term we are prepared to duplicate
-- This is *just* big enough to make test MethSharing
-- inline enough join points. Really it should be
-- smaller, and could be if we fixed Trac #4960.
{-
************************************************************************
* *
exprIsCheap, exprIsExpandable
* *
************************************************************************
Note [exprIsWorkFree]
~~~~~~~~~~~~~~~~~~~~~
exprIsWorkFree is used when deciding whether to inline something; we
don't inline it if doing so might duplicate work, by peeling off a
complete copy of the expression. Here we do not want even to
duplicate a primop (Trac #5623):
eg let x = a #+ b in x +# x
we do not want to inline/duplicate x
Previously we were a bit more liberal, which led to the primop-duplicating
problem. However, being more conservative did lead to a big regression in
one nofib benchmark, wheel-sieve1. The situation looks like this:
let noFactor_sZ3 :: GHC.Types.Int -> GHC.Types.Bool
noFactor_sZ3 = case s_adJ of _ { GHC.Types.I# x_aRs ->
case GHC.Prim.<=# x_aRs 2 of _ {
GHC.Types.False -> notDivBy ps_adM qs_adN;
GHC.Types.True -> lvl_r2Eb }}
go = \x. ...(noFactor (I# y))....(go x')...
The function 'noFactor' is heap-allocated and then called. Turns out
that 'notDivBy' is strict in its THIRD arg, but that is invisible to
the caller of noFactor, which therefore cannot do w/w and
heap-allocates noFactor's argument. At the moment (May 12) we are just
going to put up with this, because the previous more aggressive inlining
(which treated 'noFactor' as work-free) was duplicating primops, which
in turn was making inner loops of array calculations runs slow (#5623)
-}
exprIsWorkFree :: CoreExpr -> Bool
-- See Note [exprIsWorkFree]
exprIsWorkFree e = go 0 e
where -- n is the number of value arguments
go _ (Lit {}) = True
go _ (Type {}) = True
go _ (Coercion {}) = True
go n (Cast e _) = go n e
go n (Case scrut _ _ alts) = foldl (&&) (exprIsWorkFree scrut)
[ go n rhs | (_,_,rhs) <- alts ]
-- See Note [Case expressions are work-free]
go _ (Let {}) = False
go n (Var v) = isCheapApp v n
go n (Tick t e) | tickishCounts t = False
| otherwise = go n e
go n (Lam x e) | isRuntimeVar x = n==0 || go (n-1) e
| otherwise = go n e
go n (App f e) | isRuntimeArg e = exprIsWorkFree e && go (n+1) f
| otherwise = go n f
{-
Note [Case expressions are work-free]
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Are case-expressions work-free? Consider
let v = case x of (p,q) -> p
go = \y -> ...case v of ...
Should we inline 'v' at its use site inside the loop? At the moment
we do. I experimented with saying that case are *not* work-free, but
that increased allocation slightly. It's a fairly small effect, and at
the moment we go for the slightly more aggressive version which treats
(case x of ....) as work-free if the alternatives are.
Note [exprIsCheap] See also Note [Interaction of exprIsCheap and lone variables]
~~~~~~~~~~~~~~~~~~ in CoreUnfold.hs
@exprIsCheap@ looks at a Core expression and returns \tr{True} if
it is obviously in weak head normal form, or is cheap to get to WHNF.
[Note that that's not the same as exprIsDupable; an expression might be
big, and hence not dupable, but still cheap.]
By ``cheap'' we mean a computation we're willing to:
push inside a lambda, or
inline at more than one place
That might mean it gets evaluated more than once, instead of being
shared. The main examples of things which aren't WHNF but are
``cheap'' are:
* case e of
pi -> ei
(where e, and all the ei are cheap)
* let x = e in b
(where e and b are cheap)
* op x1 ... xn
(where op is a cheap primitive operator)
* error "foo"
(because we are happy to substitute it inside a lambda)
Notice that a variable is considered 'cheap': we can push it inside a lambda,
because sharing will make sure it is only evaluated once.
Note [exprIsCheap and exprIsHNF]
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Note that exprIsHNF does not imply exprIsCheap. Eg
let x = fac 20 in Just x
This responds True to exprIsHNF (you can discard a seq), but
False to exprIsCheap.
-}
exprIsCheap :: CoreExpr -> Bool
exprIsCheap = exprIsCheap' isCheapApp
exprIsExpandable :: CoreExpr -> Bool
exprIsExpandable = exprIsCheap' isExpandableApp -- See Note [CONLIKE pragma] in BasicTypes
exprIsCheap' :: CheapAppFun -> CoreExpr -> Bool
exprIsCheap' _ (Lit _) = True
exprIsCheap' _ (Type _) = True
exprIsCheap' _ (Coercion _) = True
exprIsCheap' _ (Var _) = True
exprIsCheap' good_app (Cast e _) = exprIsCheap' good_app e
exprIsCheap' good_app (Lam x e) = isRuntimeVar x
|| exprIsCheap' good_app e
exprIsCheap' good_app (Case e _ _ alts) = exprIsCheap' good_app e &&
and [exprIsCheap' good_app rhs | (_,_,rhs) <- alts]
-- Experimentally, treat (case x of ...) as cheap
-- (and case __coerce x etc.)
-- This improves arities of overloaded functions where
-- there is only dictionary selection (no construction) involved
exprIsCheap' good_app (Tick t e)
| tickishCounts t = False
| otherwise = exprIsCheap' good_app e
-- never duplicate counting ticks. If we get this wrong, then
-- HPC's entry counts will be off (check test in
-- libraries/hpc/tests/raytrace)
exprIsCheap' good_app (Let (NonRec _ b) e)
= exprIsCheap' good_app b && exprIsCheap' good_app e
exprIsCheap' good_app (Let (Rec prs) e)
= all (exprIsCheap' good_app . snd) prs && exprIsCheap' good_app e
exprIsCheap' good_app other_expr -- Applications and variables
= go other_expr []
where
-- Accumulate value arguments, then decide
go (Cast e _) val_args = go e val_args
go (App f a) val_args | isRuntimeArg a = go f (a:val_args)
| otherwise = go f val_args
go (Var _) [] = True
-- Just a type application of a variable
-- (f t1 t2 t3) counts as WHNF
-- This case is probably handeld by the good_app case
-- below, which should have a case for n=0, but putting
-- it here too is belt and braces; and it's such a common
-- case that checking for null directly seems like a
-- good plan
go (Var f) args
| good_app f (length args) -- Typically holds of data constructor applications
= go_pap args -- E.g. good_app = isCheapApp below
| otherwise
= case idDetails f of
RecSelId {} -> go_sel args
ClassOpId {} -> go_sel args
PrimOpId op -> go_primop op args
_ | isBottomingId f -> True
| otherwise -> False
-- Application of a function which
-- always gives bottom; we treat this as cheap
-- because it certainly doesn't need to be shared!
go (Tick t e) args
| not (tickishCounts t) -- don't duplicate counting ticks, see above
= go e args
go _ _ = False
--------------
go_pap args = all (exprIsCheap' good_app) args
-- Used to be "all exprIsTrivial args" due to concerns about
-- duplicating nested constructor applications, but see #4978.
-- The principle here is that
-- let x = a +# b in c *# x
-- should behave equivalently to
-- c *# (a +# b)
-- Since lets with cheap RHSs are accepted,
-- so should paps with cheap arguments
--------------
go_primop op args = primOpIsCheap op && all (exprIsCheap' good_app) args
-- In principle we should worry about primops
-- that return a type variable, since the result
-- might be applied to something, but I'm not going
-- to bother to check the number of args
--------------
go_sel [arg] = exprIsCheap' good_app arg -- I'm experimenting with making record selection
go_sel _ = False -- look cheap, so we will substitute it inside a
-- lambda. Particularly for dictionary field selection.
-- BUT: Take care with (sel d x)! The (sel d) might be cheap, but
-- there's no guarantee that (sel d x) will be too. Hence (n_val_args == 1)
-------------------------------------
type CheapAppFun = Id -> Int -> Bool
-- Is an application of this function to n *value* args
-- always cheap, assuming the arguments are cheap?
-- Mainly true of partial applications, data constructors,
-- and of course true if the number of args is zero
isCheapApp :: CheapAppFun
isCheapApp fn n_val_args
= isDataConWorkId fn
|| n_val_args == 0
|| n_val_args < idArity fn
isExpandableApp :: CheapAppFun
isExpandableApp fn n_val_args
= isConLikeId fn
|| n_val_args < idArity fn
|| go n_val_args (idType fn)
where
-- See if all the arguments are PredTys (implicit params or classes)
-- If so we'll regard it as expandable; see Note [Expandable overloadings]
-- This incidentally picks up the (n_val_args = 0) case
go 0 _ = True
go n_val_args ty
| Just (bndr, ty) <- splitPiTy_maybe ty
= caseBinder bndr
(\_tv -> go n_val_args ty)
(\bndr_ty -> isPredTy bndr_ty && go (n_val_args-1) ty)
| otherwise
= False
{-
Note [Expandable overloadings]
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Suppose the user wrote this
{-# RULE forall x. foo (negate x) = h x #-}
f x = ....(foo (negate x))....
He'd expect the rule to fire. But since negate is overloaded, we might
get this:
f = \d -> let n = negate d in \x -> ...foo (n x)...
So we treat the application of a function (negate in this case) to a
*dictionary* as expandable. In effect, every function is CONLIKE when
it's applied only to dictionaries.
************************************************************************
* *
exprOkForSpeculation
* *
************************************************************************
-}
-----------------------------
-- | 'exprOkForSpeculation' returns True of an expression that is:
--
-- * Safe to evaluate even if normal order eval might not
-- evaluate the expression at all, or
--
-- * Safe /not/ to evaluate even if normal order would do so
--
-- It is usually called on arguments of unlifted type, but not always
-- In particular, Simplify.rebuildCase calls it on lifted types
-- when a 'case' is a plain 'seq'. See the example in
-- Note [exprOkForSpeculation: case expressions] below
--
-- Precisely, it returns @True@ iff:
-- a) The expression guarantees to terminate,
-- b) soon,
-- c) without causing a write side effect (e.g. writing a mutable variable)
-- d) without throwing a Haskell exception
-- e) without risking an unchecked runtime exception (array out of bounds,
-- divide by zero)
--
-- For @exprOkForSideEffects@ the list is the same, but omitting (e).
--
-- Note that
-- exprIsHNF implies exprOkForSpeculation
-- exprOkForSpeculation implies exprOkForSideEffects
--
-- See Note [PrimOp can_fail and has_side_effects] in PrimOp
-- and Note [Implementation: how can_fail/has_side_effects affect transformations]
--
-- As an example of the considerations in this test, consider:
--
-- > let x = case y# +# 1# of { r# -> I# r# }
-- > in E
--
-- being translated to:
--
-- > case y# +# 1# of { r# ->
-- > let x = I# r#
-- > in E
-- > }
--
-- We can only do this if the @y + 1@ is ok for speculation: it has no
-- side effects, and can't diverge or raise an exception.
exprOkForSpeculation, exprOkForSideEffects :: Expr b -> Bool
exprOkForSpeculation = expr_ok primOpOkForSpeculation
exprOkForSideEffects = expr_ok primOpOkForSideEffects
-- Polymorphic in binder type
-- There is one call at a non-Id binder type, in SetLevels
expr_ok :: (PrimOp -> Bool) -> Expr b -> Bool
expr_ok _ (Lit _) = True
expr_ok _ (Type _) = True
expr_ok _ (Coercion _) = True
expr_ok primop_ok (Var v) = app_ok primop_ok v []
expr_ok primop_ok (Cast e _) = expr_ok primop_ok e
-- Tick annotations that *tick* cannot be speculated, because these
-- are meant to identify whether or not (and how often) the particular
-- source expression was evaluated at runtime.
expr_ok primop_ok (Tick tickish e)
| tickishCounts tickish = False
| otherwise = expr_ok primop_ok e
expr_ok primop_ok (Case e _ _ alts)
= expr_ok primop_ok e -- Note [exprOkForSpeculation: case expressions]
&& all (\(_,_,rhs) -> expr_ok primop_ok rhs) alts
&& altsAreExhaustive alts -- Note [Exhaustive alts]
expr_ok primop_ok other_expr
= case collectArgs other_expr of
(expr, args) | Var f <- stripTicksTopE (not . tickishCounts) expr
-> app_ok primop_ok f args
_ -> False
-----------------------------
app_ok :: (PrimOp -> Bool) -> Id -> [Expr b] -> Bool
app_ok primop_ok fun args
= case idDetails fun of
DFunId new_type -> not new_type
-- DFuns terminate, unless the dict is implemented
-- with a newtype in which case they may not
DataConWorkId {} -> True
-- The strictness of the constructor has already
-- been expressed by its "wrapper", so we don't need
-- to take the arguments into account
PrimOpId op
| isDivOp op -- Special case for dividing operations that fail
, [arg1, Lit lit] <- args -- only if the divisor is zero
-> not (isZeroLit lit) && expr_ok primop_ok arg1
-- Often there is a literal divisor, and this
-- can get rid of a thunk in an inner looop
| DataToTagOp <- op -- See Note [dataToTag speculation]
-> True
| otherwise
-> primop_ok op -- A bit conservative: we don't really need
&& all (expr_ok primop_ok) args -- to care about lazy arguments, but this is easy
_other -> isUnliftedType (idType fun) -- c.f. the Var case of exprIsHNF
|| idArity fun > n_val_args -- Partial apps
|| (n_val_args == 0 &&
isEvaldUnfolding (idUnfolding fun)) -- Let-bound values
where
n_val_args = valArgCount args
-----------------------------
altsAreExhaustive :: [Alt b] -> Bool
-- True <=> the case alternatives are definiely exhaustive
-- False <=> they may or may not be
altsAreExhaustive []
= False -- Should not happen
altsAreExhaustive ((con1,_,_) : alts)
= case con1 of
DEFAULT -> True
LitAlt {} -> False
DataAlt c -> 1 + length alts == tyConFamilySize (dataConTyCon c)
-- It is possible to have an exhaustive case that does not
-- enumerate all constructors, notably in a GADT match, but
-- we behave conservatively here -- I don't think it's important
-- enough to deserve special treatment
-- | True of dyadic operators that can fail only if the second arg is zero!
isDivOp :: PrimOp -> Bool
-- This function probably belongs in PrimOp, or even in
-- an automagically generated file.. but it's such a
-- special case I thought I'd leave it here for now.
isDivOp IntQuotOp = True
isDivOp IntRemOp = True
isDivOp WordQuotOp = True
isDivOp WordRemOp = True
isDivOp FloatDivOp = True
isDivOp DoubleDivOp = True
isDivOp _ = False
{-
Note [exprOkForSpeculation: case expressions]
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
It's always sound for exprOkForSpeculation to return False, and we
don't want it to take too long, so it bales out on complicated-looking
terms. Notably lets, which can be stacked very deeply; and in any
case the argument of exprOkForSpeculation is usually in a strict context,
so any lets will have been floated away.
However, we keep going on case-expressions. An example like this one
showed up in DPH code (Trac #3717):
foo :: Int -> Int
foo 0 = 0
foo n = (if n < 5 then 1 else 2) `seq` foo (n-1)
If exprOkForSpeculation doesn't look through case expressions, you get this:
T.$wfoo =
\ (ww :: GHC.Prim.Int#) ->
case ww of ds {
__DEFAULT -> case (case <# ds 5 of _ {
GHC.Types.False -> lvl1;
GHC.Types.True -> lvl})
of _ { __DEFAULT ->
T.$wfoo (GHC.Prim.-# ds_XkE 1) };
0 -> 0
}
The inner case is redundant, and should be nuked.
Note [Exhaustive alts]
~~~~~~~~~~~~~~~~~~~~~~
We might have something like
case x of {
A -> ...
_ -> ...(case x of { B -> ...; C -> ... })...
Here, the inner case is fine, because the A alternative
can't happen, but it's not ok to float the inner case outside
the outer one (even if we know x is evaluated outside), because
then it would be non-exhaustive. See Trac #5453.
Similarly, this is a valid program (albeit a slightly dodgy one)
let v = case x of { B -> ...; C -> ... }
in case x of
A -> ...
_ -> ...v...v....
But we don't want to speculate the v binding.
One could try to be clever, but the easy fix is simpy to regard
a non-exhaustive case as *not* okForSpeculation.
Note [dataToTag speculation]
~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Is this OK?
f x = let v::Int# = dataToTag# x
in ...
We say "yes", even though 'x' may not be evaluated. Reasons
* dataToTag#'s strictness means that its argument often will be
evaluated, but FloatOut makes that temporarily untrue
case x of y -> let v = dataToTag# y in ...
-->
case x of y -> let v = dataToTag# x in ...
Note that we look at 'x' instead of 'y' (this is to improve
floating in FloatOut). So Lint complains.
Moreover, it really *might* improve floating to let the
v-binding float out
* CorePrep makes sure dataToTag#'s argument is evaluated, just
before code gen. Until then, it's not guaranteed
************************************************************************
* *
exprIsHNF, exprIsConLike
* *
************************************************************************
-}
-- Note [exprIsHNF] See also Note [exprIsCheap and exprIsHNF]
-- ~~~~~~~~~~~~~~~~
-- | exprIsHNF returns true for expressions that are certainly /already/
-- evaluated to /head/ normal form. This is used to decide whether it's ok
-- to change:
--
-- > case x of _ -> e
--
-- into:
--
-- > e
--
-- and to decide whether it's safe to discard a 'seq'.
--
-- So, it does /not/ treat variables as evaluated, unless they say they are.
-- However, it /does/ treat partial applications and constructor applications
-- as values, even if their arguments are non-trivial, provided the argument
-- type is lifted. For example, both of these are values:
--
-- > (:) (f x) (map f xs)
-- > map (...redex...)
--
-- because 'seq' on such things completes immediately.
--
-- For unlifted argument types, we have to be careful:
--
-- > C (f x :: Int#)
--
-- Suppose @f x@ diverges; then @C (f x)@ is not a value. However this can't
-- happen: see "CoreSyn#let_app_invariant". This invariant states that arguments of
-- unboxed type must be ok-for-speculation (or trivial).
exprIsHNF :: CoreExpr -> Bool -- True => Value-lambda, constructor, PAP
exprIsHNF = exprIsHNFlike isDataConWorkId isEvaldUnfolding
-- | Similar to 'exprIsHNF' but includes CONLIKE functions as well as
-- data constructors. Conlike arguments are considered interesting by the
-- inliner.
exprIsConLike :: CoreExpr -> Bool -- True => lambda, conlike, PAP
exprIsConLike = exprIsHNFlike isConLikeId isConLikeUnfolding
-- | Returns true for values or value-like expressions. These are lambdas,
-- constructors / CONLIKE functions (as determined by the function argument)
-- or PAPs.
--
exprIsHNFlike :: (Var -> Bool) -> (Unfolding -> Bool) -> CoreExpr -> Bool
exprIsHNFlike is_con is_con_unf = is_hnf_like
where
is_hnf_like (Var v) -- NB: There are no value args at this point
= is_con v -- Catches nullary constructors,
-- so that [] and () are values, for example
|| idArity v > 0 -- Catches (e.g.) primops that don't have unfoldings
|| is_con_unf (idUnfolding v)
-- Check the thing's unfolding; it might be bound to a value
-- We don't look through loop breakers here, which is a bit conservative
-- but otherwise I worry that if an Id's unfolding is just itself,
-- we could get an infinite loop
is_hnf_like (Lit _) = True
is_hnf_like (Type _) = True -- Types are honorary Values;
-- we don't mind copying them
is_hnf_like (Coercion _) = True -- Same for coercions
is_hnf_like (Lam b e) = isRuntimeVar b || is_hnf_like e
is_hnf_like (Tick tickish e) = not (tickishCounts tickish)
&& is_hnf_like e
-- See Note [exprIsHNF Tick]
is_hnf_like (Cast e _) = is_hnf_like e
is_hnf_like (App e a)
| isValArg a = app_is_value e 1
| otherwise = is_hnf_like e
is_hnf_like (Let _ e) = is_hnf_like e -- Lazy let(rec)s don't affect us
is_hnf_like _ = False
-- There is at least one value argument
-- 'n' is number of value args to which the expression is applied
app_is_value :: CoreExpr -> Int -> Bool
app_is_value (Var fun) n_val_args
= idArity fun > n_val_args -- Under-applied function
|| is_con fun -- or constructor-like
app_is_value (Tick _ f) nva = app_is_value f nva
app_is_value (Cast f _) nva = app_is_value f nva
app_is_value (App f a) nva
| isValArg a = app_is_value f (nva + 1)
| otherwise = app_is_value f nva
app_is_value _ _ = False
{-
Note [exprIsHNF Tick]
We can discard source annotations on HNFs as long as they aren't
tick-like:
scc c (\x . e) => \x . e
scc c (C x1..xn) => C x1..xn
So we regard these as HNFs. Tick annotations that tick are not
regarded as HNF if the expression they surround is HNF, because the
tick is there to tell us that the expression was evaluated, so we
don't want to discard a seq on it.
-}
{-
************************************************************************
* *
Instantiating data constructors
* *
************************************************************************
These InstPat functions go here to avoid circularity between DataCon and Id
-}
dataConRepInstPat :: [Unique] -> DataCon -> [Type] -> ([TyVar], [Id])
dataConRepFSInstPat :: [FastString] -> [Unique] -> DataCon -> [Type] -> ([TyVar], [Id])
dataConRepInstPat = dataConInstPat (repeat ((fsLit "ipv")))
dataConRepFSInstPat = dataConInstPat
dataConInstPat :: [FastString] -- A long enough list of FSs to use for names
-> [Unique] -- An equally long list of uniques, at least one for each binder
-> DataCon
-> [Type] -- Types to instantiate the universally quantified tyvars
-> ([TyVar], [Id]) -- Return instantiated variables
-- dataConInstPat arg_fun fss us con inst_tys returns a triple
-- (ex_tvs, arg_ids),
--
-- ex_tvs are intended to be used as binders for existential type args
--
-- arg_ids are indended to be used as binders for value arguments,
-- and their types have been instantiated with inst_tys and ex_tys
-- The arg_ids include both evidence and
-- programmer-specified arguments (both after rep-ing)
--
-- Example.
-- The following constructor T1
--
-- data T a where
-- T1 :: forall b. Int -> b -> T(a,b)
-- ...
--
-- has representation type
-- forall a. forall a1. forall b. (a ~ (a1,b)) =>
-- Int -> b -> T a
--
-- dataConInstPat fss us T1 (a1',b') will return
--
-- ([a1'', b''], [c :: (a1', b')~(a1'', b''), x :: Int, y :: b''])
--
-- where the double-primed variables are created with the FastStrings and
-- Uniques given as fss and us
dataConInstPat fss uniqs con inst_tys
= ASSERT( univ_tvs `equalLength` inst_tys )
(ex_bndrs, arg_ids)
where
univ_tvs = dataConUnivTyVars con
ex_tvs = dataConExTyVars con
arg_tys = dataConRepArgTys con
arg_strs = dataConRepStrictness con -- 1-1 with arg_tys
n_ex = length ex_tvs
-- split the Uniques and FastStrings
(ex_uniqs, id_uniqs) = splitAt n_ex uniqs
(ex_fss, id_fss) = splitAt n_ex fss
-- Make the instantiating substitution for universals
univ_subst = zipTvSubst univ_tvs inst_tys
-- Make existential type variables, applyingn and extending the substitution
(full_subst, ex_bndrs) = mapAccumL mk_ex_var univ_subst
(zip3 ex_tvs ex_fss ex_uniqs)
mk_ex_var :: TCvSubst -> (TyVar, FastString, Unique) -> (TCvSubst, TyVar)
mk_ex_var subst (tv, fs, uniq) = (Type.extendTCvSubst subst tv
(mkTyVarTy new_tv)
, new_tv)
where
new_tv = mkTyVar (mkSysTvName uniq fs) kind
kind = Type.substTyUnchecked subst (tyVarKind tv)
-- Make value vars, instantiating types
arg_ids = zipWith4 mk_id_var id_uniqs id_fss arg_tys arg_strs
mk_id_var uniq fs ty str
= mkLocalIdOrCoVarWithInfo name (Type.substTyUnchecked full_subst ty) info
where
name = mkInternalName uniq (mkVarOccFS fs) noSrcSpan
info | isMarkedStrict str = vanillaIdInfo `setUnfoldingInfo` evaldUnfolding
| otherwise = vanillaIdInfo
-- See Note [Mark evaluated arguments]
{-
Note [Mark evaluated arguments]
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
When pattern matching on a constructor with strict fields, the binder
can have an 'evaldUnfolding'. Moreover, it *should* have one, so that
when loading an interface file unfolding like:
data T = MkT !Int
f x = case x of { MkT y -> let v::Int# = case y of I# n -> n+1
in ... }
we don't want Lint to complain. The 'y' is evaluated, so the
case in the RHS of the binding for 'v' is fine. But only if we
*know* that 'y' is evaluated.
c.f. add_evals in Simplify.simplAlt
************************************************************************
* *
Equality
* *
************************************************************************
-}
-- | A cheap equality test which bales out fast!
-- If it returns @True@ the arguments are definitely equal,
-- otherwise, they may or may not be equal.
--
-- See also 'exprIsBig'
cheapEqExpr :: Expr b -> Expr b -> Bool
cheapEqExpr = cheapEqExpr' (const False)
-- | Cheap expression equality test, can ignore ticks by type.
cheapEqExpr' :: (Tickish Id -> Bool) -> Expr b -> Expr b -> Bool
cheapEqExpr' ignoreTick = go_s
where go_s = go `on` stripTicksTopE ignoreTick
go (Var v1) (Var v2) = v1 == v2
go (Lit lit1) (Lit lit2) = lit1 == lit2
go (Type t1) (Type t2) = t1 `eqType` t2
go (Coercion c1) (Coercion c2) = c1 `eqCoercion` c2
go (App f1 a1) (App f2 a2)
= f1 `go_s` f2 && a1 `go_s` a2
go (Cast e1 t1) (Cast e2 t2)
= e1 `go_s` e2 && t1 `eqCoercion` t2
go (Tick t1 e1) (Tick t2 e2)
= t1 == t2 && e1 `go_s` e2
go _ _ = False
{-# INLINE go #-}
{-# INLINE cheapEqExpr' #-}
exprIsBig :: Expr b -> Bool
-- ^ Returns @True@ of expressions that are too big to be compared by 'cheapEqExpr'
exprIsBig (Lit _) = False
exprIsBig (Var _) = False
exprIsBig (Type _) = False
exprIsBig (Coercion _) = False
exprIsBig (Lam _ e) = exprIsBig e
exprIsBig (App f a) = exprIsBig f || exprIsBig a
exprIsBig (Cast e _) = exprIsBig e -- Hopefully coercions are not too big!
exprIsBig (Tick _ e) = exprIsBig e
exprIsBig _ = True
eqExpr :: InScopeSet -> CoreExpr -> CoreExpr -> Bool
-- Compares for equality, modulo alpha
eqExpr in_scope e1 e2
= go (mkRnEnv2 in_scope) e1 e2
where
go env (Var v1) (Var v2)
| rnOccL env v1 == rnOccR env v2
= True
go _ (Lit lit1) (Lit lit2) = lit1 == lit2
go env (Type t1) (Type t2) = eqTypeX env t1 t2
go env (Coercion co1) (Coercion co2) = eqCoercionX env co1 co2
go env (Cast e1 co1) (Cast e2 co2) = eqCoercionX env co1 co2 && go env e1 e2
go env (App f1 a1) (App f2 a2) = go env f1 f2 && go env a1 a2
go env (Tick n1 e1) (Tick n2 e2) = eqTickish env n1 n2 && go env e1 e2
go env (Lam b1 e1) (Lam b2 e2)
= eqTypeX env (varType b1) (varType b2) -- False for Id/TyVar combination
&& go (rnBndr2 env b1 b2) e1 e2
go env (Let (NonRec v1 r1) e1) (Let (NonRec v2 r2) e2)
= go env r1 r2 -- No need to check binder types, since RHSs match
&& go (rnBndr2 env v1 v2) e1 e2
go env (Let (Rec ps1) e1) (Let (Rec ps2) e2)
= length ps1 == length ps2
&& all2 (go env') rs1 rs2 && go env' e1 e2
where
(bs1,rs1) = unzip ps1
(bs2,rs2) = unzip ps2
env' = rnBndrs2 env bs1 bs2
go env (Case e1 b1 t1 a1) (Case e2 b2 t2 a2)
| null a1 -- See Note [Empty case alternatives] in TrieMap
= null a2 && go env e1 e2 && eqTypeX env t1 t2
| otherwise
= go env e1 e2 && all2 (go_alt (rnBndr2 env b1 b2)) a1 a2
go _ _ _ = False
-----------
go_alt env (c1, bs1, e1) (c2, bs2, e2)
= c1 == c2 && go (rnBndrs2 env bs1 bs2) e1 e2
eqTickish :: RnEnv2 -> Tickish Id -> Tickish Id -> Bool
eqTickish env (Breakpoint lid lids) (Breakpoint rid rids)
= lid == rid && map (rnOccL env) lids == map (rnOccR env) rids
eqTickish _ l r = l == r
-- | Finds differences between core expressions, modulo alpha and
-- renaming. Setting @top@ means that the @IdInfo@ of bindings will be
-- checked for differences as well.
diffExpr :: Bool -> RnEnv2 -> CoreExpr -> CoreExpr -> [SDoc]
diffExpr _ env (Var v1) (Var v2) | rnOccL env v1 == rnOccR env v2 = []
diffExpr _ _ (Lit lit1) (Lit lit2) | lit1 == lit2 = []
diffExpr _ env (Type t1) (Type t2) | eqTypeX env t1 t2 = []
diffExpr _ env (Coercion co1) (Coercion co2)
| eqCoercionX env co1 co2 = []
diffExpr top env (Cast e1 co1) (Cast e2 co2)
| eqCoercionX env co1 co2 = diffExpr top env e1 e2
diffExpr top env (Tick n1 e1) e2
| not (tickishIsCode n1) = diffExpr top env e1 e2
diffExpr top env e1 (Tick n2 e2)
| not (tickishIsCode n2) = diffExpr top env e1 e2
diffExpr top env (Tick n1 e1) (Tick n2 e2)
| eqTickish env n1 n2 = diffExpr top env e1 e2
-- The error message of failed pattern matches will contain
-- generated names, which are allowed to differ.
diffExpr _ _ (App (App (Var absent) _) _)
(App (App (Var absent2) _) _)
| isBottomingId absent && isBottomingId absent2 = []
diffExpr top env (App f1 a1) (App f2 a2)
= diffExpr top env f1 f2 ++ diffExpr top env a1 a2
diffExpr top env (Lam b1 e1) (Lam b2 e2)
| eqTypeX env (varType b1) (varType b2) -- False for Id/TyVar combination
= diffExpr top (rnBndr2 env b1 b2) e1 e2
diffExpr top env (Let bs1 e1) (Let bs2 e2)
= let (ds, env') = diffBinds top env (flattenBinds [bs1]) (flattenBinds [bs2])
in ds ++ diffExpr top env' e1 e2
diffExpr top env (Case e1 b1 t1 a1) (Case e2 b2 t2 a2)
| length a1 == length a2 && not (null a1) || eqTypeX env t1 t2
-- See Note [Empty case alternatives] in TrieMap
= diffExpr top env e1 e2 ++ concat (zipWith diffAlt a1 a2)
where env' = rnBndr2 env b1 b2
diffAlt (c1, bs1, e1) (c2, bs2, e2)
| c1 /= c2 = [text "alt-cons " <> ppr c1 <> text " /= " <> ppr c2]
| otherwise = diffExpr top (rnBndrs2 env' bs1 bs2) e1 e2
diffExpr _ _ e1 e2
= [fsep [ppr e1, text "/=", ppr e2]]
-- | Finds differences between core bindings, see @diffExpr@.
--
-- The main problem here is that while we expect the binds to have the
-- same order in both lists, this is not guaranteed. To do this
-- properly we'd either have to do some sort of unification or check
-- all possible mappings, which would be seriously expensive. So
-- instead we simply match single bindings as far as we can. This
-- leaves us just with mutually recursive and/or mismatching bindings,
-- which we then specuatively match by ordering them. It's by no means
-- perfect, but gets the job done well enough.
diffBinds :: Bool -> RnEnv2 -> [(Var, CoreExpr)] -> [(Var, CoreExpr)]
-> ([SDoc], RnEnv2)
diffBinds top env binds1 = go (length binds1) env binds1
where go _ env [] []
= ([], env)
go fuel env binds1 binds2
-- No binds left to compare? Bail out early.
| null binds1 || null binds2
= (warn env binds1 binds2, env)
-- Iterated over all binds without finding a match? Then
-- try speculatively matching binders by order.
| fuel == 0
= if not $ env `inRnEnvL` fst (head binds1)
then let env' = uncurry (rnBndrs2 env) $ unzip $
zip (sort $ map fst binds1) (sort $ map fst binds2)
in go (length binds1) env' binds1 binds2
-- If we have already tried that, give up
else (warn env binds1 binds2, env)
go fuel env ((bndr1,expr1):binds1) binds2
| let matchExpr (bndr,expr) =
(not top || null (diffIdInfo env bndr bndr1)) &&
null (diffExpr top (rnBndr2 env bndr1 bndr) expr1 expr)
, (binds2l, (bndr2,_):binds2r) <- break matchExpr binds2
= go (length binds1) (rnBndr2 env bndr1 bndr2)
binds1 (binds2l ++ binds2r)
| otherwise -- No match, so push back (FIXME O(n^2))
= go (fuel-1) env (binds1++[(bndr1,expr1)]) binds2
go _ _ _ _ = panic "diffBinds: impossible" -- GHC isn't smart enough
-- We have tried everything, but couldn't find a good match. So
-- now we just return the comparison results when we pair up
-- the binds in a pseudo-random order.
warn env binds1 binds2 =
concatMap (uncurry (diffBind env)) (zip binds1' binds2') ++
unmatched "unmatched left-hand:" (drop l binds1') ++
unmatched "unmatched right-hand:" (drop l binds2')
where binds1' = sortBy (comparing fst) binds1
binds2' = sortBy (comparing fst) binds2
l = min (length binds1') (length binds2')
unmatched _ [] = []
unmatched txt bs = [text txt $$ ppr (Rec bs)]
diffBind env (bndr1,expr1) (bndr2,expr2)
| ds@(_:_) <- diffExpr top env expr1 expr2
= locBind "in binding" bndr1 bndr2 ds
| otherwise
= diffIdInfo env bndr1 bndr2
-- | Find differences in @IdInfo@. We will especially check whether
-- the unfoldings match, if present (see @diffUnfold@).
diffIdInfo :: RnEnv2 -> Var -> Var -> [SDoc]
diffIdInfo env bndr1 bndr2
| arityInfo info1 == arityInfo info2
&& cafInfo info1 == cafInfo info2
&& oneShotInfo info1 == oneShotInfo info2
&& inlinePragInfo info1 == inlinePragInfo info2
&& occInfo info1 == occInfo info2
&& demandInfo info1 == demandInfo info2
&& callArityInfo info1 == callArityInfo info2
= locBind "in unfolding of" bndr1 bndr2 $
diffUnfold env (unfoldingInfo info1) (unfoldingInfo info2)
| otherwise
= locBind "in Id info of" bndr1 bndr2
[fsep [pprBndr LetBind bndr1, text "/=", pprBndr LetBind bndr2]]
where info1 = idInfo bndr1; info2 = idInfo bndr2
-- | Find differences in unfoldings. Note that we will not check for
-- differences of @IdInfo@ in unfoldings, as this is generally
-- redundant, and can lead to an exponential blow-up in complexity.
diffUnfold :: RnEnv2 -> Unfolding -> Unfolding -> [SDoc]
diffUnfold _ NoUnfolding NoUnfolding = []
diffUnfold _ (OtherCon cs1) (OtherCon cs2) | cs1 == cs2 = []
diffUnfold env (DFunUnfolding bs1 c1 a1)
(DFunUnfolding bs2 c2 a2)
| c1 == c2 && length bs1 == length bs2
= concatMap (uncurry (diffExpr False env')) (zip a1 a2)
where env' = rnBndrs2 env bs1 bs2
diffUnfold env (CoreUnfolding t1 _ _ v1 cl1 wf1 x1 g1)
(CoreUnfolding t2 _ _ v2 cl2 wf2 x2 g2)
| v1 == v2 && cl1 == cl2
&& wf1 == wf2 && x1 == x2 && g1 == g2
= diffExpr False env t1 t2
diffUnfold _ uf1 uf2
= [fsep [ppr uf1, text "/=", ppr uf2]]
-- | Add location information to diff messages
locBind :: String -> Var -> Var -> [SDoc] -> [SDoc]
locBind loc b1 b2 diffs = map addLoc diffs
where addLoc d = d $$ nest 2 (parens (text loc <+> bindLoc))
bindLoc | b1 == b2 = ppr b1
| otherwise = ppr b1 <> char '/' <> ppr b2
{-
************************************************************************
* *
Eta reduction
* *
************************************************************************
Note [Eta reduction conditions]
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
We try for eta reduction here, but *only* if we get all the way to an
trivial expression. We don't want to remove extra lambdas unless we
are going to avoid allocating this thing altogether.
There are some particularly delicate points here:
* We want to eta-reduce if doing so leaves a trivial expression,
*including* a cast. For example
\x. f |> co --> f |> co
(provided co doesn't mention x)
* Eta reduction is not valid in general:
\x. bot /= bot
This matters, partly for old-fashioned correctness reasons but,
worse, getting it wrong can yield a seg fault. Consider
f = \x.f x
h y = case (case y of { True -> f `seq` True; False -> False }) of
True -> ...; False -> ...
If we (unsoundly) eta-reduce f to get f=f, the strictness analyser
says f=bottom, and replaces the (f `seq` True) with just
(f `cast` unsafe-co). BUT, as thing stand, 'f' got arity 1, and it
*keeps* arity 1 (perhaps also wrongly). So CorePrep eta-expands
the definition again, so that it does not termninate after all.
Result: seg-fault because the boolean case actually gets a function value.
See Trac #1947.
So it's important to do the right thing.
* Note [Arity care]: we need to be careful if we just look at f's
arity. Currently (Dec07), f's arity is visible in its own RHS (see
Note [Arity robustness] in SimplEnv) so we must *not* trust the
arity when checking that 'f' is a value. Otherwise we will
eta-reduce
f = \x. f x
to
f = f
Which might change a terminating program (think (f `seq` e)) to a
non-terminating one. So we check for being a loop breaker first.
However for GlobalIds we can look at the arity; and for primops we
must, since they have no unfolding.
* Regardless of whether 'f' is a value, we always want to
reduce (/\a -> f a) to f
This came up in a RULE: foldr (build (/\a -> g a))
did not match foldr (build (/\b -> ...something complex...))
The type checker can insert these eta-expanded versions,
with both type and dictionary lambdas; hence the slightly
ad-hoc isDictId
* Never *reduce* arity. For example
f = \xy. g x y
Then if h has arity 1 we don't want to eta-reduce because then
f's arity would decrease, and that is bad
These delicacies are why we don't use exprIsTrivial and exprIsHNF here.
Alas.
Note [Eta reduction with casted arguments]
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Consider
(\(x:t3). f (x |> g)) :: t3 -> t2
where
f :: t1 -> t2
g :: t3 ~ t1
This should be eta-reduced to
f |> (sym g -> t2)
So we need to accumulate a coercion, pushing it inward (past
variable arguments only) thus:
f (x |> co_arg) |> co --> (f |> (sym co_arg -> co)) x
f (x:t) |> co --> (f |> (t -> co)) x
f @ a |> co --> (f |> (forall a.co)) @ a
f @ (g:t1~t2) |> co --> (f |> (t1~t2 => co)) @ (g:t1~t2)
These are the equations for ok_arg.
It's true that we could also hope to eta reduce these:
(\xy. (f x |> g) y)
(\xy. (f x y) |> g)
But the simplifier pushes those casts outwards, so we don't
need to address that here.
-}
tryEtaReduce :: [Var] -> CoreExpr -> Maybe CoreExpr
tryEtaReduce bndrs body
= go (reverse bndrs) body (mkRepReflCo (exprType body))
where
incoming_arity = count isId bndrs
go :: [Var] -- Binders, innermost first, types [a3,a2,a1]
-> CoreExpr -- Of type tr
-> Coercion -- Of type tr ~ ts
-> Maybe CoreExpr -- Of type a1 -> a2 -> a3 -> ts
-- See Note [Eta reduction with casted arguments]
-- for why we have an accumulating coercion
go [] fun co
| ok_fun fun
, let used_vars = exprFreeVars fun `unionVarSet` tyCoVarsOfCo co
, not (any (`elemVarSet` used_vars) bndrs)
= Just (mkCast fun co) -- Check for any of the binders free in the result
-- including the accumulated coercion
go bs (Tick t e) co
| tickishFloatable t
= fmap (Tick t) $ go bs e co
-- Float app ticks: \x -> Tick t (e x) ==> Tick t e
go (b : bs) (App fun arg) co
| Just (co', ticks) <- ok_arg b arg co
= fmap (flip (foldr mkTick) ticks) $ go bs fun co'
-- Float arg ticks: \x -> e (Tick t x) ==> Tick t e
go _ _ _ = Nothing -- Failure!
---------------
-- Note [Eta reduction conditions]
ok_fun (App fun (Type {})) = ok_fun fun
ok_fun (Cast fun _) = ok_fun fun
ok_fun (Tick _ expr) = ok_fun expr
ok_fun (Var fun_id) = ok_fun_id fun_id || all ok_lam bndrs
ok_fun _fun = False
---------------
ok_fun_id fun = fun_arity fun >= incoming_arity
---------------
fun_arity fun -- See Note [Arity care]
| isLocalId fun
, isStrongLoopBreaker (idOccInfo fun) = 0
| arity > 0 = arity
| isEvaldUnfolding (idUnfolding fun) = 1
-- See Note [Eta reduction of an eval'd function]
| otherwise = 0
where
arity = idArity fun
---------------
ok_lam v = isTyVar v || isEvVar v
---------------
ok_arg :: Var -- Of type bndr_t
-> CoreExpr -- Of type arg_t
-> Coercion -- Of kind (t1~t2)
-> Maybe (Coercion -- Of type (arg_t -> t1 ~ bndr_t -> t2)
-- (and similarly for tyvars, coercion args)
, [Tickish Var])
-- See Note [Eta reduction with casted arguments]
ok_arg bndr (Type ty) co
| Just tv <- getTyVar_maybe ty
, bndr == tv = Just (mkHomoForAllCos [tv] co, [])
ok_arg bndr (Var v) co
| bndr == v = let reflCo = mkRepReflCo (idType bndr)
in Just (mkFunCo Representational reflCo co, [])
ok_arg bndr (Cast e co_arg) co
| (ticks, Var v) <- stripTicksTop tickishFloatable e
, bndr == v
= Just (mkFunCo Representational (mkSymCo co_arg) co, ticks)
-- The simplifier combines multiple casts into one,
-- so we can have a simple-minded pattern match here
ok_arg bndr (Tick t arg) co
| tickishFloatable t, Just (co', ticks) <- ok_arg bndr arg co
= Just (co', t:ticks)
ok_arg _ _ _ = Nothing
{-
Note [Eta reduction of an eval'd function]
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
In Haskell is is not true that f = \x. f x
because f might be bottom, and 'seq' can distinguish them.
But it *is* true that f = f `seq` \x. f x
and we'd like to simplify the latter to the former. This amounts
to the rule that
* when there is just *one* value argument,
* f is not bottom
we can eta-reduce \x. f x ===> f
This turned up in Trac #7542.
************************************************************************
* *
\subsection{Determining non-updatable right-hand-sides}
* *
************************************************************************
Top-level constructor applications can usually be allocated
statically, but they can't if the constructor, or any of the
arguments, come from another DLL (because we can't refer to static
labels in other DLLs).
If this happens we simply make the RHS into an updatable thunk,
and 'execute' it rather than allocating it statically.
-}
-- | This function is called only on *top-level* right-hand sides.
-- Returns @True@ if the RHS can be allocated statically in the output,
-- with no thunks involved at all.
rhsIsStatic :: Platform
-> (Name -> Bool) -- Which names are dynamic
-> (Integer -> CoreExpr) -- Desugaring for integer literals (disgusting)
-- C.f. Note [Disgusting computation of CafRefs]
-- in TidyPgm
-> CoreExpr -> Bool
-- It's called (i) in TidyPgm.hasCafRefs to decide if the rhs is, or
-- refers to, CAFs; (ii) in CoreToStg to decide whether to put an
-- update flag on it and (iii) in DsExpr to decide how to expand
-- list literals
--
-- The basic idea is that rhsIsStatic returns True only if the RHS is
-- (a) a value lambda
-- (b) a saturated constructor application with static args
--
-- BUT watch out for
-- (i) Any cross-DLL references kill static-ness completely
-- because they must be 'executed' not statically allocated
-- ("DLL" here really only refers to Windows DLLs, on other platforms,
-- this is not necessary)
--
-- (ii) We treat partial applications as redexes, because in fact we
-- make a thunk for them that runs and builds a PAP
-- at run-time. The only appliations that are treated as
-- static are *saturated* applications of constructors.
-- We used to try to be clever with nested structures like this:
-- ys = (:) w ((:) w [])
-- on the grounds that CorePrep will flatten ANF-ise it later.
-- But supporting this special case made the function much more
-- complicated, because the special case only applies if there are no
-- enclosing type lambdas:
-- ys = /\ a -> Foo (Baz ([] a))
-- Here the nested (Baz []) won't float out to top level in CorePrep.
--
-- But in fact, even without -O, nested structures at top level are
-- flattened by the simplifier, so we don't need to be super-clever here.
--
-- Examples
--
-- f = \x::Int. x+7 TRUE
-- p = (True,False) TRUE
--
-- d = (fst p, False) FALSE because there's a redex inside
-- (this particular one doesn't happen but...)
--
-- h = D# (1.0## /## 2.0##) FALSE (redex again)
-- n = /\a. Nil a TRUE
--
-- t = /\a. (:) (case w a of ...) (Nil a) FALSE (redex)
--
--
-- This is a bit like CoreUtils.exprIsHNF, with the following differences:
-- a) scc "foo" (\x -> ...) is updatable (so we catch the right SCC)
--
-- b) (C x xs), where C is a contructor is updatable if the application is
-- dynamic
--
-- c) don't look through unfolding of f in (f x).
rhsIsStatic platform is_dynamic_name cvt_integer rhs = is_static False rhs
where
is_static :: Bool -- True <=> in a constructor argument; must be atomic
-> CoreExpr -> Bool
is_static False (Lam b e) = isRuntimeVar b || is_static False e
is_static in_arg (Tick n e) = not (tickishIsCode n)
&& is_static in_arg e
is_static in_arg (Cast e _) = is_static in_arg e
is_static _ (Coercion {}) = True -- Behaves just like a literal
is_static in_arg (Lit (LitInteger i _)) = is_static in_arg (cvt_integer i)
is_static _ (Lit (MachLabel {})) = False
is_static _ (Lit _) = True
-- A MachLabel (foreign import "&foo") in an argument
-- prevents a constructor application from being static. The
-- reason is that it might give rise to unresolvable symbols
-- in the object file: under Linux, references to "weak"
-- symbols from the data segment give rise to "unresolvable
-- relocation" errors at link time This might be due to a bug
-- in the linker, but we'll work around it here anyway.
-- SDM 24/2/2004
is_static in_arg other_expr = go other_expr 0
where
go (Var f) n_val_args
| (platformOS platform /= OSMinGW32) ||
not (is_dynamic_name (idName f))
= saturated_data_con f n_val_args
|| (in_arg && n_val_args == 0)
-- A naked un-applied variable is *not* deemed a static RHS
-- E.g. f = g
-- Reason: better to update so that the indirection gets shorted
-- out, and the true value will be seen
-- NB: if you change this, you'll break the invariant that THUNK_STATICs
-- are always updatable. If you do so, make sure that non-updatable
-- ones have enough space for their static link field!
go (App f a) n_val_args
| isTypeArg a = go f n_val_args
| not in_arg && is_static True a = go f (n_val_args + 1)
-- The (not in_arg) checks that we aren't in a constructor argument;
-- if we are, we don't allow (value) applications of any sort
--
-- NB. In case you wonder, args are sometimes not atomic. eg.
-- x = D# (1.0## /## 2.0##)
-- can't float because /## can fail.
go (Tick n f) n_val_args = not (tickishIsCode n) && go f n_val_args
go (Cast e _) n_val_args = go e n_val_args
go _ _ = False
saturated_data_con f n_val_args
= case isDataConWorkId_maybe f of
Just dc -> n_val_args == dataConRepArity dc
Nothing -> False
{-
************************************************************************
* *
\subsection{Type utilities}
* *
************************************************************************
-}
-- | True if the type has no non-bottom elements, e.g. when it is an empty
-- datatype, or a GADT with non-satisfiable type parameters, e.g. Int :~: Bool.
-- See Note [Bottoming expressions]
--
-- See Note [No alternatives lint check] for another use of this function.
isEmptyTy :: Type -> Bool
isEmptyTy ty
-- Data types where, given the particular type parameters, no data
-- constructor matches, are empty.
-- This includes data types with no constructors, e.g. Data.Void.Void.
| Just (tc, inst_tys) <- splitTyConApp_maybe ty
, Just dcs <- tyConDataCons_maybe tc
, all (dataConCannotMatch inst_tys) dcs
= True
| otherwise
= False
|
nushio3/ghc
|
compiler/coreSyn/CoreUtils.hs
|
Haskell
|
bsd-3-clause
| 88,132
|
module Main where
import Data.Lens.Common ((^.), (^=))
import Prelude hiding (Either(..))
import System.Console.ANSI
import System.IO
import Console
import Level
import Types
-- operator to add 2 coordinates together
(|+|) :: Coord -> Coord -> Coord
(|+|) (x1, y1) (x2, y2) = (x1 + x2, y1 + y2)
-- receive a character and return our Input data structure,
-- recursing on invalid input
getInput :: IO Input
getInput = do
char <- getChar
case char of
'q' -> return Exit
'w' -> return (Dir Up)
's' -> return (Dir Down)
'a' -> return (Dir Left)
'd' -> return (Dir Right)
_ -> getInput
-- translate a direction to a coordinate so it can be added to
-- the hero's coordinate to move the hero around
dirToCoord :: Direction -> Coord
dirToCoord Up = (0, -1)
dirToCoord Down = (0, 1)
dirToCoord Left = (-1, 0)
dirToCoord Right = (1, 0)
-- add the supplied direction to the hero's position,
-- and set that to be the hero's new position, making
-- sure to limit it between 0 and 80 in either direction
handleDir :: World -> Direction -> IO ()
handleDir w dir
| isWall coord lvl ||
isClosedDoor coord lvl = gameLoop ((^=) posL (w ^. posL) w)
| otherwise = gameLoop ((^=) posL coord w)
where
h = wHero w
lvl = wLevel w
coord = (newX, newY)
newX = hConst heroX
newY = hConst heroY
(heroX, heroY) = hCurrPos h |+| dirToCoord dir
hConst i = max 0 (min i 80)
-- when the user wants to exit we give them a thank you
-- message and then reshow the cursor
handleExit :: IO ()
handleExit = do
clearScreen
setCursorPosition 0 0
showCursor
setSGR [Reset]
putStrLn "Thank you for playing!"
-- draw the hero, process input, and either recur or exit
gameLoop :: World -> IO ()
gameLoop world = do
drawHero world
input <- getInput
case input of
Exit -> handleExit
Dir dir -> handleDir world dir
main :: IO ()
main = do
hSetEcho stdin False
hSetBuffering stdin NoBuffering
hSetBuffering stdout NoBuffering
hideCursor
setTitle "Thieflike"
clearScreen
let world = genesis { wLevel = level1, wLevels = [level1] }
drawWorld world
gameLoop world
|
jamiltron/Thieflike
|
src/Main.hs
|
Haskell
|
bsd-3-clause
| 2,227
|
{-# LANGUAGE EmptyDataDecls, TypeSynonymInstances #-}
{-# OPTIONS_GHC -fcontext-stack47 #-}
module Games.Chaos2010.Database.Spells_with_order where
import Games.Chaos2010.Database.Fields
import Database.HaskellDB.DBLayout
type Spells_with_order =
Record
(HCons (LVPair Spell_category (Expr (Maybe String)))
(HCons (LVPair Spell_name (Expr (Maybe String)))
(HCons (LVPair Base_chance (Expr (Maybe Int)))
(HCons (LVPair Alignment (Expr (Maybe Int)))
(HCons (LVPair Description (Expr (Maybe String)))
(HCons (LVPair Section_order (Expr (Maybe Int)))
(HCons (LVPair Alignment_order (Expr (Maybe Int))) HNil)))))))
spells_with_order :: Table Spells_with_order
spells_with_order = baseTable "spells_with_order"
|
JakeWheat/Chaos-2010
|
Games/Chaos2010/Database/Spells_with_order.hs
|
Haskell
|
bsd-3-clause
| 826
|
module Euler.E2
( fib
, every
)
where
fib :: [Int]
fib = scanl (+) 1 (1:fib)
every :: Int -> [a] -> [a]
every _ [] = []
every n (x:xs) = x : every n (drop (n-1) xs)
|
lslah/euler
|
src/Euler/E2.hs
|
Haskell
|
bsd-3-clause
| 184
|
module Language.GDL.Unify
( Substitution
, unify
) where
import qualified Data.Map as M
import Language.GDL.Syntax
type Substitution = M.Map Identifier Term
occurs :: Identifier -> Term -> Bool
occurs _ (Atom _) = False
occurs ident (Var identr) = ident == identr
occurs ident (Compound children) = any (occurs ident) children
occurs _ _ = False
extend :: Substitution -> Identifier -> Term -> Maybe Substitution
extend sub ident value = case M.lookup ident sub of
Just struct -> unify sub struct value
Nothing -> extvar value
where extvar (Var identr) = case M.lookup identr sub of
Just struct -> unify sub (Var ident) struct
Nothing -> if ident == identr then Just sub else Just extsub
extvar struct = if occurs ident struct then Nothing else Just extsub
extsub = M.insert ident value sub
unify :: Substitution -> Term -> Term -> Maybe Substitution
unify sub (Atom x) (Atom y)
| x == y = Just sub
| otherwise = Nothing
unify sub (Var ident) right = extend sub ident right
unify sub left (Var ident) = extend sub ident left
unify sub (Compound []) (Compound []) = Just sub
unify sub (Compound (x:xs)) (Compound (y:ys)) = case unify sub x y of
Just sub' -> unify sub' (Compound xs) (Compound ys)
Nothing -> Nothing
unify _ _ _ = Nothing
|
ian-ross/ggp
|
Language/GDL/Unify.hs
|
Haskell
|
bsd-3-clause
| 1,333
|
module WASH.CGI.AbstractSelector
-- the public interface
-- ( as_rows, as_cols, table_io, getText, selectionGroup, selectionButton, selectionDisplay)
where
import WASH.CGI.BaseCombinators (unsafe_io, once)
import WASH.CGI.CGIInternals (HTMLField, INVALID, ValidationError (..))
import WASH.CGI.CGIMonad hiding (lift)
import WASH.CGI.HTMLWrapper
import WASH.CGI.RawCGIInternal hiding (CGIEnv (..))
import WASH.Utility.JavaScript
import Data.Char (isSpace)
import Data.List ((\\))
import Data.Maybe (isJust, fromMaybe)
-- |abstract table (twodimensional)
data AT =
AT { as_raw :: [[String]]
, as_rows :: Int
, as_cols :: Int
}
instance Show AT where
showsPrec i as = showsPrec i (as_rows as, as_cols as)
instance Read AT where
readsPrec i inp =
[ (AT { as_raw = [], as_rows = r, as_cols = c }, str')
| ((r,c), str') <- readsPrec i inp
]
-- |abstract row
data AR = AR [String]
deriving (Eq, Show)
instance Read AR where
readsPrec i inp =
case dropWhile isSpace inp of
'A':'R':xs ->
[(AR xss, rest) | (xss, rest) <- reads xs]
_ -> []
readList inp =
case dropWhile isSpace inp of
'+':xs ->
[ (ar:ars, xs2)| (ar, xs1) <- reads xs, (ars, xs2) <- readList xs1 ]
'-':xs ->
[ (ars\\[ar], xs2)| (ar, xs1) <- reads xs, (ars, xs2) <- readList xs1 ]
"" ->
[([],[])]
_ -> []
getAR :: AT -> Int -> AR
getAR at r =
AR (getRow (as_raw at) r)
unAR :: AR -> [String]
unAR (AR x) = x
-- |Transform an IO action that produces a table in list form into a CGI action
-- that returns an abstract table.
table_io :: IO [[String]] -> CGI AT
table_io io =
once $
do raw <- unsafe_io io
let r = length raw
c = length (Prelude.head raw)
return (AT { as_raw = raw
, as_rows = r
, as_cols = c
})
-- |Access abstract table by row and column. Produces a test node in the
-- document monad.
getText :: Monad m => AT -> Int -> Int -> WithHTML x m ()
getText as r c =
text (getEntry (as_raw as) r c)
getRow xss r
| 0 <= r && r < length xss = xss !! r
| otherwise = []
getCol xs c
| 0 <= c && c < length xs = xs !! c
| otherwise = ""
getEntry xss r c =
getCol (getRow xss r) c
-- |a selection group is a virtual field that never appears on the screen, but
-- gives rise to a hidden input field!
data SelectionGroup a x =
SelectionGroup { selectionName :: String
, selectionToken :: CGIFieldName
, selectionString :: Maybe String
, selectionValue :: Maybe a
, selectionBound :: Bool
}
validateSelectionGroup rg =
case selectionValue rg of
Nothing | selectionBound rg ->
Left [ValidationError (selectionName rg) (selectionToken rg) (selectionString rg)]
_ ->
Right SelectionGroup { selectionName = selectionName rg
, selectionToken = selectionToken rg
, selectionString = selectionString rg
, selectionValue = selectionValue rg
, selectionBound = selectionBound rg
}
valueSelectionGroup rg =
case selectionValue rg of
Nothing -> error ("SelectionGroup { " ++
"selectionName = " ++ show (selectionName rg) ++ ", " ++
"selectionString = " ++ show (selectionString rg) ++ ", " ++
"selectionBound = " ++ show (selectionBound rg) ++
" }")
Just vl -> vl
-- |Create a selection group for a table. Selects one row.
selectionGroup :: (CGIMonad cgi) => WithHTML y cgi (SelectionGroup AR INVALID)
selectionGroup =
do token <- lift nextName
let fieldName = show token
info <- lift getInfo
lift $ addField fieldName False
let bds = bindings info
maybeString = bds >>= assocParm fieldName
-- experimental
isBound = fromMaybe False (do "UNSET" <- maybeString
return True)
maybeVal = maybeString >>= (g . reads)
g ((a,""):_) = Just a
g _ = Nothing
input (do attr "type" "hidden"
attr "name" fieldName
attr "value" "UNSET")
return $
SelectionGroup { selectionName = fieldName
, selectionToken = token
, selectionString = maybeString
, selectionValue = maybeVal
, selectionBound = isBound
}
-- |Create a selection button for an abstract table
selectionButton :: (CGIMonad cgi) =>
SelectionGroup AR INVALID -> AT -> Int -> HTMLField cgi x y ()
selectionButton sg at row buttonAttrs =
input (do attr "type" "radio"
attr "name" (fieldName++"_")
attr "onclick" ("var ff=this.form."++fieldName++
";ff.value=" ++ jsShow (show (getAR at row))++
";if(ff.getAttribute('onchange'))"++
"{WASHSubmit(ff.name);"++
"};")
buttonAttrs)
where
fieldName = selectionName sg
-- |Create a labelled selection display for an abstract table. The display
-- function takes the button element and a list of text nodes corresponding to
-- the selected row and is expected to perform the layout.
selectionDisplay :: (CGIMonad cgi) =>
SelectionGroup AR INVALID -> AT -> Int ->
(WithHTML x cgi () -> [WithHTML x cgi ()] -> WithHTML x cgi a) ->
WithHTML x cgi a
selectionDisplay sg at row displayFun =
displayFun (selectionButton sg at row empty)
(Prelude.map text $ getRow (as_raw at) row)
-- |Create a choice group for a table (0-*).
choiceGroup :: (CGIMonad cgi) => WithHTML x cgi (SelectionGroup [AR] INVALID)
choiceGroup =
do token <- lift nextName
let fieldName = show token
info <- lift getInfo
lift $ addField fieldName False
let bds = bindings info
maybeString = bds >>= assocParm fieldName
maybeVal = maybeString >>= (g . reads)
g ((a,""):_) = Just a
g _ = Nothing
input (do attr "type" "hidden"
attr "name" fieldName
attr "value" "")
return $
SelectionGroup { selectionName = fieldName
, selectionToken = token
, selectionString = maybeString
, selectionValue = maybeVal
, selectionBound = isJust bds
}
-- |Create one choice button for an abstract table
choiceButton :: (CGIMonad cgi) =>
SelectionGroup [AR] INVALID -> AT -> Int -> HTMLField cgi x y ()
choiceButton sg at row buttonAttrs =
do script_T (rawtext $
"SubmitAction[SubmitAction.length]=" ++
"function(){"++
"var f=document.forms[0];" ++
"if(f."++buttonFieldName++".checked){" ++
"f."++fieldName++".value=" ++ jsShow ('+':show (getAR at row)) ++
"+f."++fieldName++".value;" ++
"};return true};")
input_T
(do attr "type" "checkbox"
attr "name" buttonFieldName
buttonAttrs)
where
fieldName = selectionName sg
buttonFieldName = fieldName++'_':show row
-- |Create a labelled choice display for an abstract table. The display
-- function takes the button element and a list of text nodes corresponding to
-- the selected row and is expected to perform the layout.
choiceDisplay :: (CGIMonad cgi) =>
SelectionGroup [AR] INVALID -> AT -> Int ->
(WithHTML x cgi () -> [WithHTML x cgi ()] -> WithHTML x cgi a) ->
WithHTML x cgi a
choiceDisplay sg at row displayFun =
displayFun (choiceButton sg at row empty)
(Prelude.map text $ getRow (as_raw at) row)
|
nh2/WashNGo
|
WASH/CGI/AbstractSelector.hs
|
Haskell
|
bsd-3-clause
| 7,097
|
{-# LANGUAGE FlexibleContexts #-}
{-# LANGUAGE Rank2Types #-}
-----------------------------------------------------------------------------
-- |
-- Module : Data.Bits.Lens
-- Copyright : (C) 2012-14 Edward Kmett
-- License : BSD-style (see the file LICENSE)
-- Maintainer : Edward Kmett <ekmett@gmail.com>
-- Stability : experimental
-- Portability : LiberalTypeSynonyms
--
----------------------------------------------------------------------------
module Data.Bits.Lens
( (.|.~), (.&.~), (<.|.~), (<.&.~), (<<.|.~), (<<.&.~)
, (.|.=), (.&.=), (<.|.=), (<.&.=), (<<.|.=), (<<.&.=)
, bitAt
, bits
, byteAt
) where
import Control.Lens
import Control.Monad.State
import Data.Bits
import Data.Functor
import Data.Word
-- $setup
-- >>> :set -XNoOverloadedStrings
-- >>> import Data.Word
infixr 4 .|.~, .&.~, <.|.~, <.&.~, <<.|.~, <<.&.~
infix 4 .|.=, .&.=, <.|.=, <.&.=, <<.|.=, <<.&.=
-- | Bitwise '.|.' the target(s) of a 'Lens' or 'Setter'.
--
-- >>> _2 .|.~ 6 $ ("hello",3)
-- ("hello",7)
--
-- @
-- ('.|.~') :: 'Bits' a => 'Setter' s t a a -> a -> s -> t
-- ('.|.~') :: 'Bits' a => 'Iso' s t a a -> a -> s -> t
-- ('.|.~') :: 'Bits' a => 'Lens' s t a a -> a -> s -> t
-- ('.|.~') :: ('Data.Monoid.Monoid' a, 'Bits' a) => 'Traversal' s t a a -> a -> s -> t
-- @
(.|.~):: Bits a => ASetter s t a a -> a -> s -> t
l .|.~ n = over l (.|. n)
{-# INLINE (.|.~) #-}
-- | Bitwise '.&.' the target(s) of a 'Lens' or 'Setter'.
--
-- >>> _2 .&.~ 7 $ ("hello",254)
-- ("hello",6)
--
-- @
-- ('.&.~') :: 'Bits' a => 'Setter' s t a a -> a -> s -> t
-- ('.&.~') :: 'Bits' a => 'Iso' s t a a -> a -> s -> t
-- ('.&.~') :: 'Bits' a => 'Lens' s t a a -> a -> s -> t
-- ('.&.~') :: ('Data.Monoid.Monoid' a, 'Bits' a) => 'Traversal' s t a a -> a -> s -> t
-- @
(.&.~) :: Bits a => ASetter s t a a -> a -> s -> t
l .&.~ n = over l (.&. n)
{-# INLINE (.&.~) #-}
-- | Modify the target(s) of a 'Lens'', 'Setter'' or 'Traversal'' by computing its bitwise '.&.' with another value.
--
-- >>> execState (do _1 .&.= 15; _2 .&.= 3) (7,7)
-- (7,3)
--
-- @
-- ('.&.=') :: ('MonadState' s m, 'Bits' a) => 'Setter'' s a -> a -> m ()
-- ('.&.=') :: ('MonadState' s m, 'Bits' a) => 'Iso'' s a -> a -> m ()
-- ('.&.=') :: ('MonadState' s m, 'Bits' a) => 'Lens'' s a -> a -> m ()
-- ('.&.=') :: ('MonadState' s m, 'Bits' a) => 'Traversal'' s a -> a -> m ()
-- @
(.&.=):: (MonadState s m, Bits a) => ASetter' s a -> a -> m ()
l .&.= a = modify (l .&.~ a)
{-# INLINE (.&.=) #-}
-- | Modify the target(s) of a 'Lens'', 'Setter' or 'Traversal' by computing its bitwise '.|.' with another value.
--
-- >>> execState (do _1 .|.= 15; _2 .|.= 3) (7,7)
-- (15,7)
--
-- @
-- ('.|.=') :: ('MonadState' s m, 'Bits' a) => 'Setter'' s a -> a -> m ()
-- ('.|.=') :: ('MonadState' s m, 'Bits' a) => 'Iso'' s a -> a -> m ()
-- ('.|.=') :: ('MonadState' s m, 'Bits' a) => 'Lens'' s a -> a -> m ()
-- ('.|.=') :: ('MonadState' s m, 'Bits' a) => 'Traversal'' s a -> a -> m ()
-- @
(.|.=) :: (MonadState s m, Bits a) => ASetter' s a -> a -> m ()
l .|.= a = modify (l .|.~ a)
{-# INLINE (.|.=) #-}
-- | Bitwise '.|.' the target(s) of a 'Lens' (or 'Traversal'), returning the result
-- (or a monoidal summary of all of the results).
--
-- >>> _2 <.|.~ 6 $ ("hello",3)
-- (7,("hello",7))
--
-- @
-- ('<.|.~') :: 'Bits' a => 'Iso' s t a a -> a -> s -> (a, t)
-- ('<.|.~') :: 'Bits' a => 'Lens' s t a a -> a -> s -> (a, t)
-- ('<.|.~') :: ('Bits' a, 'Data.Monoid.Monoid' a) => 'Traversal' s t a a -> a -> s -> (a, t)
-- @
(<.|.~):: Bits a => LensLike ((,) a) s t a a -> a -> s -> (a, t)
l <.|.~ n = l <%~ (.|. n)
{-# INLINE (<.|.~) #-}
-- | Bitwise '.&.' the target(s) of a 'Lens' or 'Traversal', returning the result
-- (or a monoidal summary of all of the results).
--
-- >>> _2 <.&.~ 7 $ ("hello",254)
-- (6,("hello",6))
--
-- @
-- ('<.&.~') :: 'Bits' a => 'Iso' s t a a -> a -> s -> (a, t)
-- ('<.&.~') :: 'Bits' a => 'Lens' s t a a -> a -> s -> (a, t)
-- ('<.&.~') :: ('Bits' a, 'Data.Monoid.Monoid' a) => 'Traversal' s t a a -> a -> s -> (a, t)
-- @
(<.&.~) :: Bits a => LensLike ((,) a) s t a a -> a -> s -> (a, t)
l <.&.~ n = l <%~ (.&. n)
{-# INLINE (<.&.~) #-}
-- | Modify the target(s) of a 'Lens'' (or 'Traversal'') by computing its bitwise '.&.' with another value,
-- returning the result (or a monoidal summary of all of the results traversed).
--
-- >>> runState (_1 <.&.= 15) (31,0)
-- (15,(15,0))
--
-- @
-- ('<.&.=') :: ('MonadState' s m, 'Bits' a) => 'Lens'' s a -> a -> m a
-- ('<.&.=') :: ('MonadState' s m, 'Bits' a, 'Data.Monoid.Monoid' a) => 'Traversal'' s a -> a -> m a
-- @
(<.&.=):: (MonadState s m, Bits a) => LensLike' ((,)a) s a -> a -> m a
l <.&.= b = l <%= (.&. b)
{-# INLINE (<.&.=) #-}
-- | Modify the target(s) of a 'Lens'', (or 'Traversal') by computing its bitwise '.|.' with another value,
-- returning the result (or a monoidal summary of all of the results traversed).
--
-- >>> runState (_1 <.|.= 7) (28,0)
-- (31,(31,0))
--
-- @
-- ('<.|.=') :: ('MonadState' s m, 'Bits' a) => 'Lens'' s a -> a -> m a
-- ('<.|.=') :: ('MonadState' s m, 'Bits' a, 'Data.Monoid.Monoid' a) => 'Traversal'' s a -> a -> m a
-- @
(<.|.=) :: (MonadState s m, Bits a) => LensLike' ((,)a) s a -> a -> m a
l <.|.= b = l <%= (.|. b)
{-# INLINE (<.|.=) #-}
(<<.&.~) :: Bits a => Optical' (->) q ((,)a) s a -> a -> q s (a, s)
l <<.&.~ b = l $ \a -> (a, a .&. b)
{-# INLINE (<<.&.~) #-}
(<<.|.~) :: Bits a => Optical' (->) q ((,)a) s a -> a -> q s (a, s)
l <<.|.~ b = l $ \a -> (a, a .|. b)
{-# INLINE (<<.|.~) #-}
(<<.&.=) :: (MonadState s m, Bits a) => LensLike' ((,) a) s a -> a -> m a
l <<.&.= b = l %%= \a -> (a, a .&. b)
{-# INLINE (<<.&.=) #-}
(<<.|.=) :: (MonadState s m, Bits a) => LensLike' ((,) a) s a -> a -> m a
l <<.|.= b = l %%= \a -> (a, a .|. b)
{-# INLINE (<<.|.=) #-}
-- | This 'Lens' can be used to access the value of the nth bit in a number.
--
-- @'bitAt' n@ is only a legal 'Lens' into @b@ if @0 '<=' n '<' 'bitSize' ('undefined' :: b)@.
--
-- >>> 16^.bitAt 4
-- True
--
-- >>> 15^.bitAt 4
-- False
--
-- >>> 15 & bitAt 4 .~ True
-- 31
--
-- >>> 16 & bitAt 4 .~ False
-- 0
bitAt :: Bits b => Int -> IndexedLens' Int b Bool
bitAt n f b = indexed f n (testBit b n) <&> \x -> if x then setBit b n else clearBit b n
{-# INLINE bitAt #-}
-- | Get the nth byte, counting from the low end.
--
-- @'byteAt' n@ is a legal 'Lens' into @b@ iff @0 '<=' n '<' 'div' ('bitSize' ('undefined' :: b)) 8@
--
-- >>> (0xff00 :: Word16)^.byteAt 0
-- 0
--
-- >>> (0xff00 :: Word16)^.byteAt 1
-- 255
--
-- >>> byteAt 1 .~ 0 $ 0xff00 :: Word16
-- 0
--
-- >>> byteAt 0 .~ 0xff $ 0 :: Word16
-- 255
byteAt :: (Integral b, Bits b) => Int -> IndexedLens' Int b Word8
byteAt i f b = back <$> indexed f i (forward b) where
back w8 = (fromIntegral w8 `shiftL` (i * 8))
.|. (complement (255 `shiftL` (i * 8)) .&. b)
forward = fromIntegral . (.&.) 0xff . flip shiftR (i * 8)
-- | Traverse over all bits in a numeric type.
--
-- The bit position is available as the index.
--
-- >>> toListOf bits (5 :: Word8)
-- [True,False,True,False,False,False,False,False]
--
-- If you supply this an 'Integer', the result will be an infinite 'Traversal', which
-- can be productively consumed, but not reassembled.
bits :: (Num b, Bits b) => IndexedTraversal' Int b Bool
bits f b = Prelude.foldr step 0 <$> traverse g bs where
g n = (,) n <$> indexed f n (testBit b n)
bs = Prelude.takeWhile hasBit [0..]
hasBit n = complementBit b n /= b -- test to make sure that complementing this bit actually changes the value
step (n,True) r = setBit r n
step _ r = r
{-# INLINE bits #-}
|
hvr/lens
|
src/Data/Bits/Lens.hs
|
Haskell
|
bsd-3-clause
| 7,823
|
import XMonad
import XMonad.Hooks.DynamicLog
import XMonad.Hooks.ManageDocks
import XMonad.Layout.NoBorders
import XMonad.Util.Run(spawnPipe)
import XMonad.Util.EZConfig(additionalKeys)
import System.IO
main = do
xmobarProcess <- spawnPipe "start-xmobar"
trayerProcess <- spawnPipe "start-trayer"
xmonad $ defaultConfig {
manageHook = manageDocks <+> manageHook defaultConfig
, layoutHook = avoidStruts $ smartBorders $ layoutHook defaultConfig
, handleEventHook = docksEventHook <+> handleEventHook defaultConfig
, logHook = dynamicLogWithPP xmobarPP {
ppOutput = hPutStrLn xmobarProcess
, ppTitle = xmobarColor "green" "" . shorten 50
}
, modMask = mod4Mask
} `additionalKeys` [
((mod4Mask, xK_b), sendMessage ToggleStruts)
]
|
justinlynn/monadix
|
src/Main.hs
|
Haskell
|
bsd-3-clause
| 949
|
module ClassInContext where
class FFF a where
fff :: a -> a
data S a = S a
instance FFF Int where
fff x = x
instance (Eq a, FFF a) => Eq (S a) where
(S x) == (S y) = (fff x) == (fff y)
cmpr :: S Int -> S Int -> Bool
cmpr = (==)
|
phischu/fragnix
|
tests/quick/ClassInContext/ClassInContext.hs
|
Haskell
|
bsd-3-clause
| 246
|
{-# LANGUAGE CPP #-}
{-# LANGUAGE PatternGuards #-}
module AddHandler (addHandler) where
import Prelude hiding (readFile)
import System.IO (hFlush, stdout)
import Data.Char (isLower, toLower, isSpace)
import Data.List (isPrefixOf, isSuffixOf, stripPrefix)
import Data.Maybe (fromMaybe, listToMaybe)
import qualified Data.Text as T
import qualified Data.Text.IO as TIO
#if MIN_VERSION_Cabal(2, 2, 0)
import Distribution.PackageDescription.Parsec (readGenericPackageDescription)
#elif MIN_VERSION_Cabal(2, 0, 0)
import Distribution.PackageDescription.Parse (readGenericPackageDescription)
#else
import Distribution.PackageDescription.Parse (readPackageDescription)
#endif
import Distribution.PackageDescription.Configuration (flattenPackageDescription)
import Distribution.PackageDescription (allBuildInfo, hsSourceDirs)
import Distribution.Verbosity (normal)
import System.Directory (getDirectoryContents, doesFileExist)
import Control.Monad (unless)
data RouteError = EmptyRoute
| RouteCaseError
| RouteExists FilePath
deriving Eq
instance Show RouteError where
show EmptyRoute = "No name entered. Quitting ..."
show RouteCaseError = "Name must start with an upper case letter"
show (RouteExists file) = "File already exists: " ++ file
-- strict readFile
readFile :: FilePath -> IO String
readFile = fmap T.unpack . TIO.readFile
cmdLineArgsError :: String
cmdLineArgsError = "You have to specify a route name if you want to add handler with command line arguments."
addHandler :: Maybe String -> Maybe String -> [String] -> IO ()
addHandler (Just route) pat met = do
cabal <- getCabal
checked <- checkRoute route cabal
let routePair = case checked of
Left err@EmptyRoute -> (error . show) err
Left err@RouteCaseError -> (error . show) err
Left err@(RouteExists _) -> (error . show) err
Right p -> p
addHandlerFiles cabal routePair pattern methods
where
pattern = fromMaybe "" pat -- pattern defaults to ""
methods = unwords met -- methods default to none
addHandler Nothing (Just _) _ = error cmdLineArgsError
addHandler Nothing _ (_:_) = error cmdLineArgsError
addHandler _ _ _ = addHandlerInteractive
addHandlerInteractive :: IO ()
addHandlerInteractive = do
cabal <- getCabal
let routeInput = do
putStr "Name of route (without trailing R): "
hFlush stdout
name <- getLine
checked <- checkRoute name cabal
case checked of
Left err@EmptyRoute -> (error . show) err
Left err@RouteCaseError -> print err >> routeInput
Left err@(RouteExists _) -> do
print err
putStrLn "Try another name or leave blank to exit"
routeInput
Right p -> return p
routePair <- routeInput
putStr "Enter route pattern (ex: /entry/#EntryId): "
hFlush stdout
pattern <- getLine
putStr "Enter space-separated list of methods (ex: GET POST): "
hFlush stdout
methods <- getLine
addHandlerFiles cabal routePair pattern methods
getRoutesFilePath :: IO FilePath
getRoutesFilePath = do
let oldPath = "config/routes"
oldExists <- doesFileExist oldPath
pure $ if oldExists
then oldPath
else "config/routes.yesodroutes"
addHandlerFiles :: FilePath -> (String, FilePath) -> String -> String -> IO ()
addHandlerFiles cabal (name, handlerFile) pattern methods = do
src <- getSrcDir cabal
let applicationFile = concat [src, "/Application.hs"]
modify applicationFile $ fixApp name
modify cabal $ fixCabal name
routesPath <- getRoutesFilePath
modify routesPath $ fixRoutes name pattern methods
writeFile handlerFile $ mkHandler name pattern methods
specExists <- doesFileExist specFile
unless specExists $
writeFile specFile $ mkSpec name pattern methods
where
specFile = "test/Handler/" ++ name ++ "Spec.hs"
modify fp f = readFile fp >>= writeFile fp . f
getCabal :: IO FilePath
getCabal = do
allFiles <- getDirectoryContents "."
case filter (".cabal" `isSuffixOf`) allFiles of
[x] -> return x
[] -> error "No cabal file found"
_ -> error "Too many cabal files found"
checkRoute :: String -> FilePath -> IO (Either RouteError (String, FilePath))
checkRoute name cabal =
case name of
[] -> return $ Left EmptyRoute
c:_
| isLower c -> return $ Left RouteCaseError
| otherwise -> do
-- Check that the handler file doesn't already exist
src <- getSrcDir cabal
let handlerFile = concat [src, "/Handler/", name, ".hs"]
exists <- doesFileExist handlerFile
if exists
then (return . Left . RouteExists) handlerFile
else return $ Right (name, handlerFile)
fixApp :: String -> String -> String
fixApp name =
unlines . reverse . go . reverse . lines
where
l spaces = "import " ++ spaces ++ "Handler." ++ name
go [] = [l ""]
go (x:xs)
| Just y <- stripPrefix "import " x, "Handler." `isPrefixOf` dropWhile (== ' ') y = l (takeWhile (== ' ') y) : x : xs
| otherwise = x : go xs
fixCabal :: String -> String -> String
fixCabal name orig =
unlines $ (reverse $ go $ reverse libraryLines) ++ restLines
where
origLines = lines orig
(libraryLines, restLines) = break isExeTestBench origLines
isExeTestBench x = any
(\prefix -> prefix `isPrefixOf` x)
[ "executable"
, "test-suite"
, "benchmark"
]
l = " Handler." ++ name
go [] = [l]
go (x:xs)
| "Handler." `isPrefixOf` x' = (spaces ++ "Handler." ++ name) : x : xs
| otherwise = x : go xs
where
(spaces, x') = span isSpace x
fixRoutes :: String -> String -> String -> String -> String
fixRoutes name pattern methods fileContents =
fileContents ++ l
where
l = concat
[ startingCharacter
, pattern
, " "
, name
, "R "
, methods
, "\n"
]
startingCharacter = if "\n" `isSuffixOf` fileContents then "" else "\n"
mkSpec :: String -> String -> String -> String
mkSpec name _ methods = unlines
$ ("module Handler." ++ name ++ "Spec (spec) where")
: ""
: "import TestImport"
: ""
: "spec :: Spec"
: "spec = withApp $ do"
: concatMap go (words methods)
where
go method =
[ ""
, " describe \"" ++ func ++ "\" $ do"
, " error \"Spec not implemented: " ++ func ++ "\""
, ""]
where
func = concat [map toLower method, name, "R"]
mkHandler :: String -> String -> String -> String
mkHandler name pattern methods = unlines
$ ("module Handler." ++ name ++ " where")
: ""
: "import Import"
: concatMap go (words methods)
where
go method =
[ ""
, concat $ func : " :: " : map toArrow types ++ ["Handler Html"]
, concat
[ func
, " "
, concatMap toArgument types
, "= error \"Not yet implemented: "
, func
, "\""
]
]
where
func = concat [map toLower method, name, "R"]
types = getTypes pattern
toArrow t = concat [t, " -> "]
toArgument t = concat [uncapitalize t, " "]
getTypes "" = []
getTypes ('/':rest) = getTypes rest
getTypes (c:rest) | c `elem` "#*" =
typ : getTypes rest'
where
(typ, rest') = break (== '/') rest
getTypes rest = getTypes $ dropWhile (/= '/') rest
uncapitalize :: String -> String
uncapitalize (x:xs) = toLower x : xs
uncapitalize "" = ""
getSrcDir :: FilePath -> IO FilePath
getSrcDir cabal = do
#if MIN_VERSION_Cabal(2, 0, 0)
pd <- flattenPackageDescription <$> readGenericPackageDescription normal cabal
#else
pd <- flattenPackageDescription <$> readPackageDescription normal cabal
#endif
let buildInfo = allBuildInfo pd
srcDirs = concatMap hsSourceDirs buildInfo
return $ fromMaybe "." $ listToMaybe srcDirs
|
geraldus/yesod
|
yesod-bin/AddHandler.hs
|
Haskell
|
mit
| 8,191
|
module RecursiveRef where
{-# ANN module "HLint: ignore Eta reduce" #-}
-- Recursive function call without type signature targets the monomorphic
-- binding. This verifies that we handle the case.
-- - @recNoSig defines/binding FunRNS
recNoSig x =
-- - @recNoSig ref FunRNS
recNoSig x
-- - @localRecNoSig ref FunLRNS
dummy = localRecNoSig
where
-- - @localRecNoSig defines/binding FunLRNS
localRecNoSig x =
-- - @localRecNoSig ref FunLRNS
localRecNoSig x
-- Recursive call to function with type signature targets the polymorphic
-- binding.
recWithSig :: Int -> Int
-- - @recWithSig defines/binding FunRWS
recWithSig x =
-- - @recWithSig ref FunRWS
recWithSig x
-- - @mutualNoSigA defines/binding FunMA
-- - @mutualNoSigB ref FunMB
mutualNoSigA = mutualNoSigB
-- - @mutualNoSigB defines/binding FunMB
-- - @mutualNoSigA ref FunMA
mutualNoSigB = mutualNoSigA
-- - @etaNoSig defines/binding FunENS
etaNoSig =
-- - @etaNoSig ref FunENS
etaNoSig
|
google/haskell-indexer
|
kythe-verification/testdata/basic/RecursiveRef.hs
|
Haskell
|
apache-2.0
| 997
|
{-# LANGUAGE OverloadedStrings #-}
{-# LANGUAGE ViewPatterns #-}
-- |
-- Module : Documentation.Haddock.Parser
-- Copyright : (c) Mateusz Kowalczyk 2013-2014,
-- Simon Hengel 2013
-- License : BSD-like
--
-- Maintainer : haddock@projects.haskell.org
-- Stability : experimental
-- Portability : portable
--
-- Parser used for Haddock comments. For external users of this
-- library, the most commonly used combination of functions is going
-- to be
--
-- @'toRegular' . '_doc' . 'parseParas'@
module Documentation.Haddock.Parser ( parseString, parseParas
, overIdentifier, toRegular, Identifier
) where
import Control.Applicative
import Control.Arrow (first)
import Control.Monad
import qualified Data.ByteString.Char8 as BS
import Data.Char (chr, isAsciiUpper)
import Data.List (stripPrefix, intercalate, unfoldr, elemIndex)
import Data.Maybe (fromMaybe, mapMaybe)
import Data.Monoid
import qualified Data.Set as Set
import Documentation.Haddock.Doc
import Documentation.Haddock.Parser.Monad hiding (take, endOfLine)
import Documentation.Haddock.Parser.Util
import Documentation.Haddock.Types
import Documentation.Haddock.Utf8
import Prelude hiding (takeWhile)
import qualified Prelude as P
-- $setup
-- >>> :set -XOverloadedStrings
-- | Identifier string surrounded with opening and closing quotes/backticks.
type Identifier = (Char, String, Char)
-- | Drops the quotes/backticks around all identifiers, as if they
-- were valid but still 'String's.
toRegular :: DocH mod Identifier -> DocH mod String
toRegular = fmap (\(_, x, _) -> x)
-- | Maps over 'DocIdentifier's over 'String' with potentially failing
-- conversion using user-supplied function. If the conversion fails,
-- the identifier is deemed to not be valid and is treated as a
-- regular string.
overIdentifier :: (String -> Maybe a)
-> DocH mod Identifier
-> DocH mod a
overIdentifier f d = g d
where
g (DocIdentifier (o, x, e)) = case f x of
Nothing -> DocString $ o : x ++ [e]
Just x' -> DocIdentifier x'
g DocEmpty = DocEmpty
g (DocAppend x x') = DocAppend (g x) (g x')
g (DocString x) = DocString x
g (DocParagraph x) = DocParagraph $ g x
g (DocIdentifierUnchecked x) = DocIdentifierUnchecked x
g (DocModule x) = DocModule x
g (DocWarning x) = DocWarning $ g x
g (DocEmphasis x) = DocEmphasis $ g x
g (DocMonospaced x) = DocMonospaced $ g x
g (DocBold x) = DocBold $ g x
g (DocUnorderedList x) = DocUnorderedList $ fmap g x
g (DocOrderedList x) = DocOrderedList $ fmap g x
g (DocDefList x) = DocDefList $ fmap (\(y, z) -> (g y, g z)) x
g (DocCodeBlock x) = DocCodeBlock $ g x
g (DocHyperlink x) = DocHyperlink x
g (DocPic x) = DocPic x
g (DocMathInline x) = DocMathInline x
g (DocMathDisplay x) = DocMathDisplay x
g (DocAName x) = DocAName x
g (DocProperty x) = DocProperty x
g (DocExamples x) = DocExamples x
g (DocHeader (Header l x)) = DocHeader . Header l $ g x
g (DocTable (Table h b)) = DocTable (Table (map (fmap g) h) (map (fmap g) b))
parse :: Parser a -> BS.ByteString -> (ParserState, a)
parse p = either err id . parseOnly (p <* endOfInput)
where
err = error . ("Haddock.Parser.parse: " ++)
-- | Main entry point to the parser. Appends the newline character
-- to the input string.
parseParas :: String -- ^ String to parse
-> MetaDoc mod Identifier
parseParas input = case parseParasState input of
(state, a) -> MetaDoc { _meta = Meta { _version = parserStateSince state }
, _doc = a
}
parseParasState :: String -> (ParserState, DocH mod Identifier)
parseParasState =
parse (p <* skipSpace) . encodeUtf8 . (++ "\n") . filter (/= '\r')
where
p :: Parser (DocH mod Identifier)
p = docConcat <$> paragraph `sepBy` many (skipHorizontalSpace *> "\n")
parseParagraphs :: String -> Parser (DocH mod Identifier)
parseParagraphs input = case parseParasState input of
(state, a) -> setParserState state >> return a
-- | Parse a text paragraph. Actually just a wrapper over 'parseStringBS' which
-- drops leading whitespace and encodes the string to UTF8 first.
parseString :: String -> DocH mod Identifier
parseString = parseStringBS . encodeUtf8 . dropWhile isSpace . filter (/= '\r')
parseStringBS :: BS.ByteString -> DocH mod Identifier
parseStringBS = snd . parse p
where
p :: Parser (DocH mod Identifier)
p = docConcat <$> many (monospace <|> anchor <|> identifier <|> moduleName
<|> picture <|> mathDisplay <|> mathInline
<|> markdownImage
<|> hyperlink <|> bold
<|> emphasis <|> encodedChar <|> string'
<|> skipSpecialChar)
-- | Parses and processes
-- <https://en.wikipedia.org/wiki/Numeric_character_reference Numeric character references>
--
-- >>> parseString "A"
-- DocString "A"
encodedChar :: Parser (DocH mod a)
encodedChar = "&#" *> c <* ";"
where
c = DocString . return . chr <$> num
num = hex <|> decimal
hex = ("x" <|> "X") *> hexadecimal
-- | List of characters that we use to delimit any special markup.
-- Once we have checked for any of these and tried to parse the
-- relevant markup, we can assume they are used as regular text.
specialChar :: [Char]
specialChar = "_/<@\"&'`# "
-- | Plain, regular parser for text. Called as one of the last parsers
-- to ensure that we have already given a chance to more meaningful parsers
-- before capturing their characers.
string' :: Parser (DocH mod a)
string' = DocString . unescape . decodeUtf8 <$> takeWhile1_ (notInClass specialChar)
where
unescape "" = ""
unescape ('\\':x:xs) = x : unescape xs
unescape (x:xs) = x : unescape xs
-- | Skips a single special character and treats it as a plain string.
-- This is done to skip over any special characters belonging to other
-- elements but which were not deemed meaningful at their positions.
skipSpecialChar :: Parser (DocH mod a)
skipSpecialChar = DocString . return <$> satisfy (inClass specialChar)
-- | Emphasis parser.
--
-- >>> parseString "/Hello world/"
-- DocEmphasis (DocString "Hello world")
emphasis :: Parser (DocH mod Identifier)
emphasis = DocEmphasis . parseStringBS <$>
mfilter ('\n' `BS.notElem`) ("/" *> takeWhile1_ (/= '/') <* "/")
-- | Bold parser.
--
-- >>> parseString "__Hello world__"
-- DocBold (DocString "Hello world")
bold :: Parser (DocH mod Identifier)
bold = DocBold . parseStringBS <$> disallowNewline ("__" *> takeUntil "__")
disallowNewline :: Parser BS.ByteString -> Parser BS.ByteString
disallowNewline = mfilter ('\n' `BS.notElem`)
-- | Like `takeWhile`, but unconditionally take escaped characters.
takeWhile_ :: (Char -> Bool) -> Parser BS.ByteString
takeWhile_ p = scan False p_
where
p_ escaped c
| escaped = Just False
| not $ p c = Nothing
| otherwise = Just (c == '\\')
-- | Like `takeWhile1`, but unconditionally take escaped characters.
takeWhile1_ :: (Char -> Bool) -> Parser BS.ByteString
takeWhile1_ = mfilter (not . BS.null) . takeWhile_
-- | Text anchors to allow for jumping around the generated documentation.
--
-- >>> parseString "#Hello world#"
-- DocAName "Hello world"
anchor :: Parser (DocH mod a)
anchor = DocAName . decodeUtf8 <$>
disallowNewline ("#" *> takeWhile1_ (/= '#') <* "#")
-- | Monospaced strings.
--
-- >>> parseString "@cruel@"
-- DocMonospaced (DocString "cruel")
monospace :: Parser (DocH mod Identifier)
monospace = DocMonospaced . parseStringBS
<$> ("@" *> takeWhile1_ (/= '@') <* "@")
-- | Module names: we try our reasonable best to only allow valid
-- Haskell module names, with caveat about not matching on technically
-- valid unicode symbols.
moduleName :: Parser (DocH mod a)
moduleName = DocModule <$> (char '"' *> modid <* char '"')
where
modid = intercalate "." <$> conid `sepBy1` "."
conid = (:)
<$> satisfy isAsciiUpper
-- NOTE: According to Haskell 2010 we should actually only
-- accept {small | large | digit | ' } here. But as we can't
-- match on unicode characters, this is currently not possible.
-- Note that we allow ‘#’ to suport anchors.
<*> (decodeUtf8 <$> takeWhile (notInClass " .&[{}(=*)+]!|@/;,^?\"\n"))
-- | Picture parser, surrounded by \<\< and \>\>. It's possible to specify
-- a title for the picture.
--
-- >>> parseString "<<hello.png>>"
-- DocPic (Picture {pictureUri = "hello.png", pictureTitle = Nothing})
-- >>> parseString "<<hello.png world>>"
-- DocPic (Picture {pictureUri = "hello.png", pictureTitle = Just "world"})
picture :: Parser (DocH mod a)
picture = DocPic . makeLabeled Picture . decodeUtf8
<$> disallowNewline ("<<" *> takeUntil ">>")
-- | Inline math parser, surrounded by \\( and \\).
--
-- >>> parseString "\\(\\int_{-\\infty}^{\\infty} e^{-x^2/2} = \\sqrt{2\\pi}\\)"
-- DocMathInline "\\int_{-\\infty}^{\\infty} e^{-x^2/2} = \\sqrt{2\\pi}"
mathInline :: Parser (DocH mod a)
mathInline = DocMathInline . decodeUtf8
<$> disallowNewline ("\\(" *> takeUntil "\\)")
-- | Display math parser, surrounded by \\[ and \\].
--
-- >>> parseString "\\[\\int_{-\\infty}^{\\infty} e^{-x^2/2} = \\sqrt{2\\pi}\\]"
-- DocMathDisplay "\\int_{-\\infty}^{\\infty} e^{-x^2/2} = \\sqrt{2\\pi}"
mathDisplay :: Parser (DocH mod a)
mathDisplay = DocMathDisplay . decodeUtf8
<$> ("\\[" *> takeUntil "\\]")
markdownImage :: Parser (DocH mod a)
markdownImage = fromHyperlink <$> ("!" *> linkParser)
where
fromHyperlink (Hyperlink url label) = DocPic (Picture url label)
-- | Paragraph parser, called by 'parseParas'.
paragraph :: Parser (DocH mod Identifier)
paragraph = examples <|> table <|> do
indent <- takeIndent
choice
[ since
, unorderedList indent
, orderedList indent
, birdtracks
, codeblock
, property
, header
, textParagraphThatStartsWithMarkdownLink
, definitionList indent
, docParagraph <$> textParagraph
]
-- | Provides support for grid tables.
--
-- Tables are composed by an optional header and body. The header is composed by
-- a single row. The body is composed by a non-empty list of rows.
--
-- Example table with header:
--
-- > +----------+----------+
-- > | /32bit/ | 64bit |
-- > +==========+==========+
-- > | 0x0000 | @0x0000@ |
-- > +----------+----------+
--
-- Algorithms loosely follows ideas in
-- http://docutils.sourceforge.net/docutils/parsers/rst/tableparser.py
--
table :: Parser (DocH mod Identifier)
table = do
-- first we parse the first row, which determines the width of the table
firstRow <- parseFirstRow
let len = BS.length firstRow
-- then we parse all consequtive rows starting and ending with + or |,
-- of the width `len`.
restRows <- many (parseRestRows len)
-- Now we gathered the table block, the next step is to split the block
-- into cells.
DocTable <$> tableStepTwo len (firstRow : restRows)
where
parseFirstRow :: Parser BS.ByteString
parseFirstRow = do
skipHorizontalSpace
-- upper-left corner is +
c <- char '+'
cs <- many1 (char '-' <|> char '+')
-- upper right corner is + too
guard (last cs == '+')
-- trailing space
skipHorizontalSpace
_ <- char '\n'
return (BS.cons c $ BS.pack cs)
parseRestRows :: Int -> Parser BS.ByteString
parseRestRows l = do
skipHorizontalSpace
c <- char '|' <|> char '+'
bs <- scan (l - 2) predicate
c2 <- char '|' <|> char '+'
-- trailing space
skipHorizontalSpace
_ <- char '\n'
return (BS.cons c (BS.snoc bs c2))
where
predicate n c
| n <= 0 = Nothing
| c == '\n' = Nothing
| otherwise = Just (n - 1)
-- Second step searchs for row of '+' and '=' characters, records it's index
-- and changes to '=' to '-'.
tableStepTwo
:: Int -- ^ width
-> [BS.ByteString] -- ^ rows
-> Parser (Table (DocH mod Identifier))
tableStepTwo width = go 0 [] where
go _ left [] = tableStepThree width (reverse left) Nothing
go n left (r : rs)
| BS.all (`elem` ['+', '=']) r =
tableStepThree width (reverse left ++ r' : rs) (Just n)
| otherwise =
go (n + 1) (r : left) rs
where
r' = BS.map (\c -> if c == '=' then '-' else c) r
-- Third step recognises cells in the table area, returning a list of TC, cells.
tableStepThree
:: Int -- ^ width
-> [BS.ByteString] -- ^ rows
-> Maybe Int -- ^ index of header separator
-> Parser (Table (DocH mod Identifier))
tableStepThree width rs hdrIndex = do
cells <- loop (Set.singleton (0, 0))
tableStepFour rs hdrIndex cells
where
height = length rs
loop :: Set.Set (Int, Int) -> Parser [TC]
loop queue = case Set.minView queue of
Nothing -> return []
Just ((y, x), queue')
| y + 1 >= height || x + 1 >= width -> loop queue'
| otherwise -> case scanRight x y of
Nothing -> loop queue'
Just (x2, y2) -> do
let tc = TC y x y2 x2
fmap (tc :) $ loop $ queue' `Set.union` Set.fromList
[(y, x2), (y2, x), (y2, x2)]
-- scan right looking for +, then try scan down
--
-- do we need to record + saw on the way left and down?
scanRight :: Int -> Int -> Maybe (Int, Int)
scanRight x y = go (x + 1) where
bs = rs !! y
go x' | x' >= width = fail "overflow right "
| BS.index bs x' == '+' = scanDown x y x' <|> go (x' + 1)
| BS.index bs x' == '-' = go (x' + 1)
| otherwise = fail $ "not a border (right) " ++ show (x,y,x')
-- scan down looking for +
scanDown :: Int -> Int -> Int -> Maybe (Int, Int)
scanDown x y x2 = go (y + 1) where
go y' | y' >= height = fail "overflow down"
| BS.index (rs !! y') x2 == '+' = scanLeft x y x2 y' <|> go (y' + 1)
| BS.index (rs !! y') x2 == '|' = go (y' + 1)
| otherwise = fail $ "not a border (down) " ++ show (x,y,x2,y')
-- check that at y2 x..x2 characters are '+' or '-'
scanLeft :: Int -> Int -> Int -> Int -> Maybe (Int, Int)
scanLeft x y x2 y2
| all (\x' -> BS.index bs x' `elem` ['+', '-']) [x..x2] = scanUp x y x2 y2
| otherwise = fail $ "not a border (left) " ++ show (x,y,x2,y2)
where
bs = rs !! y2
-- check that at y2 x..x2 characters are '+' or '-'
scanUp :: Int -> Int -> Int -> Int -> Maybe (Int, Int)
scanUp x y x2 y2
| all (\y' -> BS.index (rs !! y') x `elem` ['+', '|']) [y..y2] = return (x2, y2)
| otherwise = fail $ "not a border (up) " ++ show (x,y,x2,y2)
-- | table cell: top left bottom right
data TC = TC !Int !Int !Int !Int
deriving Show
tcXS :: TC -> [Int]
tcXS (TC _ x _ x2) = [x, x2]
tcYS :: TC -> [Int]
tcYS (TC y _ y2 _) = [y, y2]
-- | Fourth step. Given the locations of cells, forms 'Table' structure.
tableStepFour :: [BS.ByteString] -> Maybe Int -> [TC] -> Parser (Table (DocH mod Identifier))
tableStepFour rs hdrIndex cells = case hdrIndex of
Nothing -> return $ Table [] rowsDoc
Just i -> case elemIndex i yTabStops of
Nothing -> return $ Table [] rowsDoc
Just i' -> return $ uncurry Table $ splitAt i' rowsDoc
where
xTabStops = sortNub $ concatMap tcXS cells
yTabStops = sortNub $ concatMap tcYS cells
sortNub :: Ord a => [a] -> [a]
sortNub = Set.toList . Set.fromList
init' :: [a] -> [a]
init' [] = []
init' [_] = []
init' (x : xs) = x : init' xs
rowsDoc = (fmap . fmap) parseStringBS rows
rows = map makeRow (init' yTabStops)
where
makeRow y = TableRow $ mapMaybe (makeCell y) cells
makeCell y (TC y' x y2 x2)
| y /= y' = Nothing
| otherwise = Just $ TableCell xts yts (extract (x + 1) (y + 1) (x2 - 1) (y2 - 1))
where
xts = length $ P.takeWhile (< x2) $ dropWhile (< x) xTabStops
yts = length $ P.takeWhile (< y2) $ dropWhile (< y) yTabStops
-- extract cell contents given boundaries
extract :: Int -> Int -> Int -> Int -> BS.ByteString
extract x y x2 y2 = BS.intercalate "\n"
[ BS.take (x2 - x + 1) $ BS.drop x $ rs !! y'
| y' <- [y .. y2]
]
-- | Parse \@since annotations.
since :: Parser (DocH mod a)
since = ("@since " *> version <* skipHorizontalSpace <* endOfLine) >>= setSince >> return DocEmpty
where
version = decimal `sepBy1'` "."
-- | Headers inside the comment denoted with @=@ signs, up to 6 levels
-- deep.
--
-- >>> snd <$> parseOnly header "= Hello"
-- Right (DocHeader (Header {headerLevel = 1, headerTitle = DocString "Hello"}))
-- >>> snd <$> parseOnly header "== World"
-- Right (DocHeader (Header {headerLevel = 2, headerTitle = DocString "World"}))
header :: Parser (DocH mod Identifier)
header = do
let psers = map (string . encodeUtf8 . concat . flip replicate "=") [6, 5 .. 1]
pser = foldl1 (<|>) psers
delim <- decodeUtf8 <$> pser
line <- skipHorizontalSpace *> nonEmptyLine >>= return . parseString
rest <- paragraph <|> return DocEmpty
return $ DocHeader (Header (length delim) line) `docAppend` rest
textParagraph :: Parser (DocH mod Identifier)
textParagraph = parseString . intercalate "\n" <$> many1 nonEmptyLine
textParagraphThatStartsWithMarkdownLink :: Parser (DocH mod Identifier)
textParagraphThatStartsWithMarkdownLink = docParagraph <$> (docAppend <$> markdownLink <*> optionalTextParagraph)
where
optionalTextParagraph :: Parser (DocH mod Identifier)
optionalTextParagraph = (docAppend <$> whitespace <*> textParagraph) <|> pure DocEmpty
whitespace :: Parser (DocH mod a)
whitespace = DocString <$> (f <$> takeHorizontalSpace <*> optional "\n")
where
f :: BS.ByteString -> Maybe BS.ByteString -> String
f xs (fromMaybe "" -> x)
| BS.null (xs <> x) = ""
| otherwise = " "
-- | Parses unordered (bullet) lists.
unorderedList :: BS.ByteString -> Parser (DocH mod Identifier)
unorderedList indent = DocUnorderedList <$> p
where
p = ("*" <|> "-") *> innerList indent p
-- | Parses ordered lists (numbered or dashed).
orderedList :: BS.ByteString -> Parser (DocH mod Identifier)
orderedList indent = DocOrderedList <$> p
where
p = (paren <|> dot) *> innerList indent p
dot = (decimal :: Parser Int) <* "."
paren = "(" *> decimal <* ")"
-- | Generic function collecting any further lines belonging to the
-- list entry and recursively collecting any further lists in the
-- same paragraph. Usually used as
--
-- > someListFunction = listBeginning *> innerList someListFunction
innerList :: BS.ByteString -> Parser [DocH mod Identifier]
-> Parser [DocH mod Identifier]
innerList indent item = do
c <- takeLine
(cs, items) <- more indent item
let contents = docParagraph . parseString . dropNLs . unlines $ c : cs
return $ case items of
Left p -> [contents `docAppend` p]
Right i -> contents : i
-- | Parses definition lists.
definitionList :: BS.ByteString -> Parser (DocH mod Identifier)
definitionList indent = DocDefList <$> p
where
p = do
label <- "[" *> (parseStringBS <$> takeWhile1_ (notInClass "]\n")) <* ("]" <* optional ":")
c <- takeLine
(cs, items) <- more indent p
let contents = parseString . dropNLs . unlines $ c : cs
return $ case items of
Left x -> [(label, contents `docAppend` x)]
Right i -> (label, contents) : i
-- | Drops all trailing newlines.
dropNLs :: String -> String
dropNLs = reverse . dropWhile (== '\n') . reverse
-- | Main worker for 'innerList' and 'definitionList'.
-- We need the 'Either' here to be able to tell in the respective functions
-- whether we're dealing with the next list or a nested paragraph.
more :: Monoid a => BS.ByteString -> Parser a
-> Parser ([String], Either (DocH mod Identifier) a)
more indent item = innerParagraphs indent
<|> moreListItems indent item
<|> moreContent indent item
<|> pure ([], Right mempty)
-- | Used by 'innerList' and 'definitionList' to parse any nested paragraphs.
innerParagraphs :: BS.ByteString
-> Parser ([String], Either (DocH mod Identifier) a)
innerParagraphs indent = (,) [] . Left <$> ("\n" *> indentedParagraphs indent)
-- | Attempts to fetch the next list if possibly. Used by 'innerList' and
-- 'definitionList' to recursively grab lists that aren't separated by a whole
-- paragraph.
moreListItems :: BS.ByteString -> Parser a
-> Parser ([String], Either (DocH mod Identifier) a)
moreListItems indent item = (,) [] . Right <$> indentedItem
where
indentedItem = string indent *> skipSpace *> item
-- | Helper for 'innerList' and 'definitionList' which simply takes
-- a line of text and attempts to parse more list content with 'more'.
moreContent :: Monoid a => BS.ByteString -> Parser a
-> Parser ([String], Either (DocH mod Identifier) a)
moreContent indent item = first . (:) <$> nonEmptyLine <*> more indent item
-- | Parses an indented paragraph.
-- The indentation is 4 spaces.
indentedParagraphs :: BS.ByteString -> Parser (DocH mod Identifier)
indentedParagraphs indent =
(concat <$> dropFrontOfPara indent') >>= parseParagraphs
where
indent' = string $ BS.append indent " "
-- | Grab as many fully indented paragraphs as we can.
dropFrontOfPara :: Parser BS.ByteString -> Parser [String]
dropFrontOfPara sp = do
currentParagraph <- some (sp *> takeNonEmptyLine)
followingParagraphs <-
skipHorizontalSpace *> nextPar -- we have more paragraphs to take
<|> skipHorizontalSpace *> nlList -- end of the ride, remember the newline
<|> endOfInput *> return [] -- nothing more to take at all
return (currentParagraph ++ followingParagraphs)
where
nextPar = (++) <$> nlList <*> dropFrontOfPara sp
nlList = "\n" *> return ["\n"]
nonSpace :: BS.ByteString -> Parser BS.ByteString
nonSpace xs
| not $ any (not . isSpace) $ decodeUtf8 xs = fail "empty line"
| otherwise = return xs
-- | Takes a non-empty, not fully whitespace line.
--
-- Doesn't discard the trailing newline.
takeNonEmptyLine :: Parser String
takeNonEmptyLine = do
(++ "\n") . decodeUtf8 <$> (takeWhile1 (/= '\n') >>= nonSpace) <* "\n"
-- | Takes indentation of first non-empty line.
--
-- More precisely: skips all whitespace-only lines and returns indentation
-- (horizontal space, might be empty) of that non-empty line.
takeIndent :: Parser BS.ByteString
takeIndent = do
indent <- takeHorizontalSpace
"\n" *> takeIndent <|> return indent
-- | Blocks of text of the form:
--
-- >> foo
-- >> bar
-- >> baz
--
birdtracks :: Parser (DocH mod a)
birdtracks = DocCodeBlock . DocString . intercalate "\n" . stripSpace <$> many1 line
where
line = skipHorizontalSpace *> ">" *> takeLine
stripSpace :: [String] -> [String]
stripSpace = fromMaybe <*> mapM strip'
where
strip' (' ':xs') = Just xs'
strip' "" = Just ""
strip' _ = Nothing
-- | Parses examples. Examples are a paragraph level entitity (separated by an empty line).
-- Consecutive examples are accepted.
examples :: Parser (DocH mod a)
examples = DocExamples <$> (many (skipHorizontalSpace *> "\n") *> go)
where
go :: Parser [Example]
go = do
prefix <- decodeUtf8 <$> takeHorizontalSpace <* ">>>"
expr <- takeLine
(rs, es) <- resultAndMoreExamples
return (makeExample prefix expr rs : es)
where
resultAndMoreExamples :: Parser ([String], [Example])
resultAndMoreExamples = moreExamples <|> result <|> pure ([], [])
where
moreExamples :: Parser ([String], [Example])
moreExamples = (,) [] <$> go
result :: Parser ([String], [Example])
result = first . (:) <$> nonEmptyLine <*> resultAndMoreExamples
makeExample :: String -> String -> [String] -> Example
makeExample prefix expression res =
Example (strip expression) result
where
result = map (substituteBlankLine . tryStripPrefix) res
tryStripPrefix xs = fromMaybe xs (stripPrefix prefix xs)
substituteBlankLine "<BLANKLINE>" = ""
substituteBlankLine xs = xs
nonEmptyLine :: Parser String
nonEmptyLine = mfilter (any (not . isSpace)) takeLine
takeLine :: Parser String
takeLine = decodeUtf8 <$> takeWhile (/= '\n') <* endOfLine
endOfLine :: Parser ()
endOfLine = void "\n" <|> endOfInput
-- | Property parser.
--
-- >>> snd <$> parseOnly property "prop> hello world"
-- Right (DocProperty "hello world")
property :: Parser (DocH mod a)
property = DocProperty . strip . decodeUtf8 <$> ("prop>" *> takeWhile1 (/= '\n'))
-- |
-- Paragraph level codeblock. Anything between the two delimiting \@ is parsed
-- for markup.
codeblock :: Parser (DocH mod Identifier)
codeblock =
DocCodeBlock . parseStringBS . dropSpaces
<$> ("@" *> skipHorizontalSpace *> "\n" *> block' <* "@")
where
dropSpaces xs =
let rs = decodeUtf8 xs
in case splitByNl rs of
[] -> xs
ys -> case last ys of
' ':_ -> case mapM dropSpace ys of
Nothing -> xs
Just zs -> encodeUtf8 $ intercalate "\n" zs
_ -> xs
-- This is necessary because ‘lines’ swallows up a trailing newline
-- and we lose information about whether the last line belongs to @ or to
-- text which we need to decide whether we actually want to be dropping
-- anything at all.
splitByNl = unfoldr (\x -> case x of
'\n':s -> Just (span (/= '\n') s)
_ -> Nothing)
. ('\n' :)
dropSpace "" = Just ""
dropSpace (' ':xs) = Just xs
dropSpace _ = Nothing
block' = scan False p
where
p isNewline c
| isNewline && c == '@' = Nothing
| isNewline && isSpace c = Just isNewline
| otherwise = Just $ c == '\n'
hyperlink :: Parser (DocH mod a)
hyperlink = DocHyperlink . makeLabeled Hyperlink . decodeUtf8
<$> disallowNewline ("<" *> takeUntil ">")
<|> autoUrl
<|> markdownLink
markdownLink :: Parser (DocH mod a)
markdownLink = DocHyperlink <$> linkParser
linkParser :: Parser Hyperlink
linkParser = flip Hyperlink <$> label <*> (whitespace *> url)
where
label :: Parser (Maybe String)
label = Just . strip . decode <$> ("[" *> takeUntil "]")
whitespace :: Parser ()
whitespace = skipHorizontalSpace <* optional ("\n" *> skipHorizontalSpace)
url :: Parser String
url = rejectWhitespace (decode <$> ("(" *> takeUntil ")"))
rejectWhitespace :: MonadPlus m => m String -> m String
rejectWhitespace = mfilter (all (not . isSpace))
decode :: BS.ByteString -> String
decode = removeEscapes . decodeUtf8
-- | Looks for URL-like things to automatically hyperlink even if they
-- weren't marked as links.
autoUrl :: Parser (DocH mod a)
autoUrl = mkLink <$> url
where
url = mappend <$> ("http://" <|> "https://" <|> "ftp://") <*> takeWhile1 (not . isSpace)
mkLink :: BS.ByteString -> DocH mod a
mkLink s = case unsnoc s of
Just (xs, x) | inClass ",.!?" x -> DocHyperlink (Hyperlink (decodeUtf8 xs) Nothing) `docAppend` DocString [x]
_ -> DocHyperlink (Hyperlink (decodeUtf8 s) Nothing)
-- | Parses strings between identifier delimiters. Consumes all input that it
-- deems to be valid in an identifier. Note that it simply blindly consumes
-- characters and does no actual validation itself.
parseValid :: Parser String
parseValid = p some
where
idChar =
satisfy (\c -> isAlpha_ascii c
|| isDigit c
-- N.B. '-' is placed first otherwise attoparsec thinks
-- it belongs to a character class
|| inClass "-_.!#$%&*+/<=>?@\\|~:^" c)
p p' = do
vs' <- p' $ utf8String "⋆" <|> return <$> idChar
let vs = concat vs'
c <- peekChar'
case c of
'`' -> return vs
'\'' -> (\x -> vs ++ "'" ++ x) <$> ("'" *> p many') <|> return vs
_ -> fail "outofvalid"
-- | Parses UTF8 strings from ByteString streams.
utf8String :: String -> Parser String
utf8String x = decodeUtf8 <$> string (encodeUtf8 x)
-- | Parses identifiers with help of 'parseValid'. Asks GHC for
-- 'String' from the string it deems valid.
identifier :: Parser (DocH mod Identifier)
identifier = do
o <- idDelim
vid <- parseValid
e <- idDelim
return $ DocIdentifier (o, vid, e)
where
idDelim = satisfy (\c -> c == '\'' || c == '`')
|
Fuuzetsu/haddock
|
haddock-library/src/Documentation/Haddock/Parser.hs
|
Haskell
|
bsd-2-clause
| 29,240
|
module Settings.Packages.Base (basePackageArgs) where
import Expression
import Settings
basePackageArgs :: Args
basePackageArgs = package base ? do
integerLibraryName <- pkgName <$> getIntegerPackage
mconcat [ builder GhcCabal ? arg ("--flags=" ++ integerLibraryName)
-- This fixes the 'unknown symbol stat' issue.
-- See: https://github.com/snowleopard/hadrian/issues/259.
, builder (Ghc CompileCWithGhc) ? arg "-optc-O2" ]
|
bgamari/shaking-up-ghc
|
src/Settings/Packages/Base.hs
|
Haskell
|
bsd-3-clause
| 471
|
module Graphics.Gnuplot.Frame (
Frame.T,
cons, simple, empty,
) where
import qualified Graphics.Gnuplot.Frame.OptionSet as OptionSet
import qualified Graphics.Gnuplot.Private.Frame as Frame
import qualified Graphics.Gnuplot.Private.Plot as Plot
import qualified Graphics.Gnuplot.Private.GraphEmpty as Empty
import qualified Graphics.Gnuplot.Private.Graph as Graph
cons :: OptionSet.T graph -> Plot.T graph -> Frame.T graph
cons = Frame.Cons
simple :: Graph.C graph => Plot.T graph -> Frame.T graph
simple = cons OptionSet.deflt
empty :: Frame.T Empty.T
empty = simple $ Plot.pure []
|
wavewave/gnuplot
|
src/Graphics/Gnuplot/Frame.hs
|
Haskell
|
bsd-3-clause
| 597
|
{-# LANGUAGE DeriveDataTypeable, PatternGuards #-}
module Tim.Smallpt.Render(
Context(..),
Refl(..),
Sphere(..),
Vec(..),
Work(..),
(|*|),
(|+|),
(|-|),
clamp,
cross,
dot,
line,
makeWork,
norm,
vmult) where
import Control.Applicative
import Control.Monad.State
import Data.Data
import Data.Ord
import Data.List
import Data.Typeable
import Random
data Vec a = Vec a a a
deriving (Data, Typeable)
instance Functor Vec where
fmap f (Vec x y z) = Vec (f x) (f y) (f z)
(|+|) :: Num a => Vec a -> Vec a -> Vec a
(Vec x1 y1 z1) |+| (Vec x2 y2 z2) = Vec (x1 + x2) (y1 + y2) (z1 + z2)
(|-|) :: Num a => Vec a -> Vec a -> Vec a
(Vec x1 y1 z1) |-| (Vec x2 y2 z2) = Vec (x1 - x2) (y1 - y2) (z1 - z2)
(|*|) :: Num a => Vec a -> a -> Vec a
v |*| n = fmap (* n) v
vmult :: Num a => Vec a -> Vec a -> Vec a
(Vec x1 y1 z1) `vmult` (Vec x2 y2 z2) = Vec (x1 * x2) (y1 * y2) (z1 * z2)
norm :: Floating a => Vec a -> Vec a
norm v = let Vec x y z = v in v |*| (1 / sqrt ((x * x) + (y * y) + (z * z)))
dot :: Num a => Vec a -> Vec a -> a
(Vec x1 y1 z1) `dot` (Vec x2 y2 z2) = (x1 * x2) + (y1 * y2) + (z1 * z2)
cross :: Num a => Vec a -> Vec a -> Vec a
(Vec x1 y1 z1) `cross` (Vec x2 y2 z2) = Vec (y1 * z2 - z1 * y2) (z1 * x2 - x1 * z2) (x1 * y2 - y1 * x2)
infixl 6 |+|
infixl 6 |-|
infixl 7 |*|
data Ray a = Ray (Vec a) (Vec a)
data Refl = DIFF
| SPEC
| REFR
deriving (Data, Typeable)
data Sphere a = Sphere { radius :: a,
position :: Vec a,
emission :: Vec a,
colour :: Vec a,
refl :: Refl }
deriving (Data, Typeable)
intersectSphere :: (Floating a, Ord a) => Ray a -> Sphere a -> Maybe a
intersectSphere (Ray o d) s | det < 0 = Nothing
| t > eps = Just t
| t' > eps = Just t'
| otherwise = Nothing
where op = position s |-| o -- Solve t^2*d.d + 2*t*(o-p).d + (o-p).(o-p)-R^2 = 0
eps = 1e-4
b = op `dot` d
det = (b * b) - (op `dot` op) + (radius s * radius s)
det' = sqrt det
t = b - det'
t' = b + det'
maybeMinimumBy :: (a -> a -> Ordering) -> [a] -> Maybe a
maybeMinimumBy _ [] = Nothing
maybeMinimumBy f l = Just (minimumBy f l)
intersectScene :: (Floating a, Ord a) => [Sphere a] -> Ray a -> Maybe (Sphere a, a)
intersectScene scene r = maybeMinimumBy (comparing snd) [(s, t) | (s, Just t) <- map ((,) <*> intersectSphere r) scene]
radiance' :: (Floating a, Ord a, Random a, RandomGen g) => [Sphere a] -> Ray a -> Int -> Sphere a -> a -> State g (Vec a)
radiance' scene r depth obj t | depth >= 5 = return (emission obj) --R.R.
| otherwise = do p' <- State (randomR (0, 1))
if p' >= p
then return (emission obj) --R.R.
else let f = colour obj |*| (1.0 / p) in ((emission obj) |+|) . (f `vmult`) <$> reflect (refl obj)
where Ray raypos raydir = r
x = raypos |+| (raydir |*| t)
n = norm (x |-| position obj)
nl | (n `dot` raydir) < 0 = n
| otherwise = n |*| (-1)
p = let Vec fx fy fz = colour obj in maximum [fx, fy, fz]
reflRay = Ray x (raydir |-| (n |*| (2 * (n `dot` raydir))))
reflect DIFF = let w = nl -- Ideal DIFFUSE reflection
Vec wx _ _ = w
u | abs wx > 0.1 = norm (Vec 0 1 0 `cross` w)
| otherwise = norm (Vec 1 0 0 `cross` w)
v = w `cross` u
in do r1 <- State (randomR (0, 2 * pi))
r2 <- State (randomR (0, 1))
let r2s = sqrt r2
d = norm ((u |*| (cos r1 * r2s)) |+|
(v |*| (sin r1 * r2s)) |+|
(w |*| sqrt (1 - r2)))
radiance scene (Ray x d) (depth + 1)
reflect SPEC = radiance scene reflRay (depth + 1) -- Ideal SPECULAR reflection
reflect REFR | cos2t < 0 = radiance scene reflRay (depth + 1) -- Total internal reflection
| depth >= 2 = do pp' <- State (randomR (0, 1))
if pp' < pp
then (|*| rp) <$> radiance scene reflRay (depth + 1)
else (|*| tp) <$> radiance scene (Ray x tdir) (depth + 1)
| otherwise = do re' <- (|*| re) <$> radiance scene reflRay (depth + 1)
tr' <- (|*| tr) <$> radiance scene (Ray x tdir) (depth + 1)
return (re' |+| tr') -- Ideal dielectric REFRACTION
where into = (n `dot` nl) > 0 -- Ray from outside going in?
nc = 1
nt = 1.5
nnt | into = nc / nt
| otherwise = nt / nc
ddn = raydir `dot` nl
cos2t = 1 - (nnt * nnt * (1 - (ddn * ddn)))
tdir = norm ((raydir |*| nnt) |-| (n |*| ((if into then 1 else (-1)) * (ddn * nnt + sqrt cos2t))))
a = nt - nc
b = nt + nc
r0 = a * a / (b * b)
c | into = 1 + ddn
| otherwise = 1 - tdir `dot` n
re = r0 + ((1 - r0) * c * c * c * c * c)
tr = 1 - re
pp = 0.25 + (0.5 * re)
rp = re / p
tp = tr / (1 - pp)
radiance :: (Floating a, Ord a, Random a, RandomGen g) => [Sphere a] -> Ray a -> Int -> State g (Vec a)
radiance scene r depth | Just (obj, t) <- intersectScene scene r = radiance' scene r depth obj t
| otherwise = return (Vec 0 0 0)
data Context a = Context { ctxw :: Int,
ctxh :: Int,
ctxsamp :: Int,
ctxcx :: Vec a,
ctxcy :: Vec a,
ctxcamdir :: Vec a,
ctxcampos :: Vec a,
ctxscene :: [Sphere a] }
deriving (Data, Typeable)
clamp :: (Num a, Ord a) => a -> a
clamp x | x < 0 = 0
| x > 1 = 1
| otherwise = x
line :: (Floating a, Ord a, Random a) => Context a -> Int -> [Vec a]
line context y = evalState (mapM (pixel . subtract 1) [1..w]) (mkStdGen (y * y * y))
where Context { ctxw = w, ctxh = h, ctxsamp = samp, ctxcx = cx, ctxcy = cy, ctxcamdir = camdir, ctxcampos = campos, ctxscene = scene } = context
pixel x = (|*| 0.25) . foldl1 (|+|) <$> sequence [subpixel x sx sy | sy <- [0 :: Int, 1], sx <- [0 :: Int, 1]]
subpixel x sx sy = fmap clamp . (|*| (1 / fromIntegral samp)) . foldl1 (|+|) <$> replicateM samp (sample x sx sy)
sample x sx sy = do r1 <- State (randomR (0, 4))
r2 <- State (randomR (0, 4))
let dx | r1 < 2 = sqrt r1 - 2
| otherwise = 2 - sqrt (4 - r1)
dy | r2 < 2 = sqrt r2 - 2
| otherwise = 2 - sqrt (4 - r2)
d = (cx |*| ((((fromIntegral sx + 0.5 + dx) / 2 + fromIntegral x) / fromIntegral w) - 0.5)) |+|
(cy |*| ((((fromIntegral sy + 0.5 + dy) / 2 + fromIntegral y) / fromIntegral h) - 0.5)) |+| camdir
ray = Ray (campos |+| (d |*| 140.0)) (norm d)
radiance scene ray 0
data Work a = RenderLine a Int
deriving (Data, Typeable)
makeWork :: Floating a => Int -> Int -> Int -> [Sphere a] -> [Work (Context a)]
makeWork w h samp scene = map (RenderLine context . (h -)) [1..h]
where context = Context { ctxw = w, ctxh = h, ctxsamp = samp, ctxcx = cx, ctxcy = cy, ctxcampos = Vec 50 52 295.6, ctxcamdir = camdir, ctxscene = scene }
camdir = norm (Vec 0 (-0.042612) (-1))
cx = Vec (0.5135 * fromIntegral w / fromIntegral h) 0 0
cy = norm (cx `cross` camdir) |*| 0.5135
|
timrobinson/smallpt-haskell
|
Tim/Smallpt/Render.hs
|
Haskell
|
bsd-3-clause
| 10,331
|
{- |
Module : Database.HDBC.PostgreSQL
Copyright : Copyright (C) 2005-2011 John Goerzen
License : BSD3
Maintainer : John Goerzen <jgoerzen@complete.org>
Stability : provisional
Portability: portable
HDBC driver interface for PostgreSQL 8.x
Written by John Goerzen, jgoerzen\@complete.org
/NOTE ON DATES AND TIMES/
The recommended correspondence between PostgreSQL date and time types and HDBC SqlValue
types is:
* SqlLocalDate: DATE
* SqlLocalTimeOfDay: TIME WITHOUT TIME ZONE
* SqlZonedLocalTimeOfDay: TIME WITH TIME ZONE
* SqlLocalTime: TIMESTAMP WITHOUT TIME ZONE
* SqlZonedTime: TIMESTAMP WITH TIME ZONE
* SqlUTCTime: TIMESTAMP WITH TIME ZONE
* SqlDiffTime: INTERVAL
* SqlPOSIXTime: NUMERIC
* SqlEpochTime: INTEGER
* SqlTimeDiff: INTERVAL
Other combinations are possible, and may even be converted automatically.
The above simply represents the types that seem the most logical correspondence,
and thus are tested by the HDBC-PostgreSQL test suite.
-}
module Database.HDBC.PostgreSQL
(
-- * Connecting to Databases
connectPostgreSQL, withPostgreSQL,
connectPostgreSQL', withPostgreSQL',
Connection,
-- * Transactions
begin,
-- * PostgreSQL Error Codes
--
-- |When an @SqlError@ is thrown, the field @seState@ is set to one of the following
-- error codes.
module Database.HDBC.PostgreSQL.ErrorCodes,
-- * Threading
-- $threading
)
where
import Database.HDBC.PostgreSQL.Connection(connectPostgreSQL, withPostgreSQL,
connectPostgreSQL', withPostgreSQL',
begin, Connection())
import Database.HDBC.PostgreSQL.ErrorCodes
{- $threading
Provided the local libpq library is thread-safe, multiple 'Connection's may be used
to have concurrent database queries. Concurrent queries issued on a single
'Connection' will be performed serially.
When the local libpq library is not thread-safe (ie. it has not been compiled with
--enable-thread-safety), only a single database function will be performed at a time.
-}
|
cabrera/hdbc-postgresql
|
Database/HDBC/PostgreSQL.hs
|
Haskell
|
bsd-3-clause
| 2,143
|
-----------------------------------------------------------------------------
-- |
-- Module : RefacSlicing
-- Copyright : (c) Christopher Brown 2005
--
-- Maintainer : cmb21@kent.ac.uk
-- Stability : provisional
-- Portability : portable
--
-- This module contains a transformation for HaRe.
-- Symoblic Evaluation on tuples.
-- creates functions which evaluate tha expressions
-- within the return value of a function.
-- e.g.
--
-- @ f x y = (x, y) @
--
-- @ f1 x = x @
--
-- @ f2 y = y @
--
-----------------------------------------------------------------------------
module RefacSlicing where
import AbstractIO
import Data.Maybe
import Data.List
import RefacUtils
import RefacRedunDec
import SlicingUtils
data Patt = Match HsMatchP | MyPat HsDeclP | Def [Char]
refacSlicing args
= do let
fileName = args!!0
beginRow = read (args!!1)::Int
beginCol = read (args!!2)::Int
endRow = read (args!!3)::Int
endCol = read (args!!4)::Int
AbstractIO.putStrLn "refacSlicing"
-- Parse the input file.
modInfo@(inscps, exps, mod, tokList) <- parseSourceFile fileName
-- Find the function that's been highlighted as the refactree
let (loc, pnt, pats, exp, wh, p)
= findDefNameAndExp tokList
(beginRow, beginCol)
(endRow, endCol)
mod
let newExp = locToExp (beginRow, beginCol) (endRow, endCol) tokList mod
let transExp = rewriteExpression exp newExp
if newExp == defaultExp
then do
error "Program slicing can only be performed on an expression."
else do
(wh', newExp') <- doRefac wh transExp
-- ((_,_), (tokList', mod')) <- applyRefac (checkCase exp newExp wh') (Just inscps, exps, mod, tokList) fileName
-- AbstractIO.putStrLn $ show (newExp, wh'')
(_,refWh) <- checkCase exp newExp wh'
-- (mod',((tokList',modified),_))<-doRemovingWhere fileName mod tokList exp newExp' wh'
((_,_), (tokList', mod')) <- applyRefac (doRemovingWhere exp newExp' refWh) (Just (inscps, exps, mod, tokList)) fileName
((_,m), (tokList'', mod'')) <- applyRefac (doRemoving1 exp newExp' wh) (Just (inscps, exps, mod', tokList')) fileName
-- ((_,_), (newTokList, newMod)) <- applyRefac (doTranslation exp transExp) (Just (inscps, exps, mod'', tokList'')) fileName
-- AbstractIO.putStrLn $ show tokList''
writeRefactoredFiles False [((fileName, True), (tokList'', mod''))]
AbstractIO.putStrLn "Completed.\n"
doTranslation e nT (_,_,mod)
= do
newMod <- update e nT mod
return newMod
sliceSubExp p old exp wh loc pnt pats (_,_, mod) = do
(decls, newExp) <- removeRedun wh exp
mod' <- updating p mod loc pnt pats newExp decls
return mod'
changeName newName (PNT (PN (UnQual _) (G modName _ optSrc)) Value s)
= PNT (PN (UnQual newName) (G modName newName optSrc)) Value s
updating (match@(Match x)) mod loc pnt pats rhs ds = do
mod' <- update x (newMatch loc pnt pats rhs ds) mod
return mod'
updating (pat@(MyPat x)) mod loc pnt pats rhs ds = do
mod' <- update x (newDecl loc pnt pats rhs ds) mod
return mod'
newMatch loc pnt pats rhs ds = HsMatch loc pnt pats (HsBody rhs) ds
newDecl loc pnt pats rhs ds = Dec (HsFunBind loc [HsMatch loc pnt pats (HsBody rhs) ds] )
checkFreeInWhere :: [PName] -> [HsDeclP] -> [HsDeclP]
checkFreeInWhere [] _ = []
checkFreeInWhere _ [] = []
checkFreeInWhere (p:ps) list = (checkSinInWhere p list) ++ (checkFreeInWhere ps list)
where
checkSinInWhere :: PName -> [HsDeclP] -> [HsDeclP]
checkSinInWhere p [] = []
checkSinInWhere p (x:xs)
| defines p x = [x]
| otherwise = checkSinInWhere p xs
rewriteExpression :: HsExpP -> HsExpP -> HsExpP
rewriteExpression e@(Exp (HsInfixApp e1 o e2)) newExp
| findEntity newExp e1 = (rewriteExpression e1 newExp)
| findEntity newExp e2 = (rewriteExpression e2 newExp)
| otherwise = e
rewriteExpression (Exp (HsLet ds e)) newExp = (Exp (HsLet ds (rewriteExpression e newExp)))
rewriteExpression (Exp (HsLambda ds e)) newExp = (Exp (HsLambda ds newExp))
rewriteExpression (Exp (HsParen e1)) newExp = rewriteExpression e1 newExp
rewriteExpression e1 e2 = e2
{-|
Takes the position of the highlighted code and returns
the function name, the list of arguments, the expression that has been
highlighted by the user, and any where\/let clauses associated with the
function.
-}
{-findDefNameAndExp :: Term t => [PosToken] -- ^ The token stream for the
-- file to be
-- refactored.
-> (Int, Int) -- ^ The beginning position of the highlighting.
-> (Int, Int) -- ^ The end position of the highlighting.
-> t -- ^ The abstract syntax tree.
-> (SrcLoc, PNT, FunctionPats, HsExpP, WhereDecls) -- ^ A tuple of,
-- (the function name, the list of arguments,
-- the expression highlighted, any where\/let clauses
-- associated with the function).
-}
findDefNameAndExp toks beginPos endPos t
= fromMaybe ([], defaultPNT, [], defaultExp, [], Def [])
(applyTU (once_tdTU (failTU `adhocTU` inMatch `adhocTU` inPat)) t)
where
--The selected sub-expression is in the rhs of a match
inMatch (match@(HsMatch loc1 pnt pats (rhs@(HsBody e)) ds)::HsMatchP)
| locToExp beginPos endPos toks rhs /= defaultExp
= Just ([loc1], pnt, pats, e, ds, (Match match))
inMatch _ = Nothing
--The selected sub-expression is in the rhs of a pattern-binding
inPat (pat@(Dec (HsPatBind loc1 ps (rhs@(HsBody e)) ds))::HsDeclP)
| locToExp beginPos endPos toks rhs /= defaultExp
= if isSimplePatBind pat
then Just ([loc1], patToPNT ps, [], e, ds, (MyPat pat))
else error "A complex pattern binding can not be generalised!"
inPat _ = Nothing
|
kmate/HaRe
|
old/refactorer/RefacSlicing.hs
|
Haskell
|
bsd-3-clause
| 6,790
|
module Poly4 () where
import Language.Haskell.Liquid.Prelude
x = choose 0
baz y = y
prop = liquidAssertB (baz True)
|
abakst/liquidhaskell
|
tests/pos/poly4.hs
|
Haskell
|
bsd-3-clause
| 125
|
module Test10 where
f x = x + y where y = 37
g = 1 + 37
|
SAdams601/HaRe
|
old/testing/refacFunDef/Test10_AstOut.hs
|
Haskell
|
bsd-3-clause
| 59
|
{-# LANGUAGE TemplateHaskell #-}
-- test the representation of unboxed literals
module Main
where
$(
[d|
foo :: Int -> Int
foo x
| x == 5 = 6
foo x = 7
|]
)
$(
[d|
bar :: Maybe Int -> Int
bar x
| Just y <- x = y
bar _ = 9
|]
)
main :: IO ()
main = do putStrLn $ show $ foo 5
putStrLn $ show $ foo 8
putStrLn $ show $ bar (Just 2)
putStrLn $ show $ bar Nothing
|
danse/ghcjs
|
test/ghc/th/tH_repGuardOutput.hs
|
Haskell
|
mit
| 484
|
-- There was a lot of discussion about various ways of computing
-- Bernouilli numbers (whatever they are) on haskell-cafe in March 2003
-- Here's one of the programs.
-- It's not a very good test, I suspect, because it manipulates big integers,
-- and so probably spends most of its time in GMP.
import Data.Ratio
import System.Environment
-- powers = [[r^n | r<-[2..]] | n<-1..]
-- type signature required for compilers lacking the monomorphism restriction
powers :: [[Integer]]
powers = [2..] : map (zipWith (*) (head powers)) powers
-- powers = [[(-1)^r * r^n | r<-[2..]] | n<-1..]
-- type signature required for compilers lacking the monomorphism restriction
neg_powers :: [[Integer]]
neg_powers =
map (zipWith (\n x -> if n then x else -x) (iterate not True)) powers
pascal:: [[Integer]]
pascal = [1,2,1] : map (\line -> zipWith (+) (line++[0]) (0:line)) pascal
bernoulli 0 = 1
bernoulli 1 = -(1%2)
bernoulli n | odd n = 0
bernoulli n =
(-1)%2
+ sum [ fromIntegral ((sum $ zipWith (*) powers (tail $ tail combs)) -
fromIntegral k) %
fromIntegral (k+1)
| (k,combs)<- zip [2..n] pascal]
where powers = (neg_powers!!(n-1))
main = do
[arg] <- getArgs
let n = (read arg)::Int
putStr $ "Bernoulli of " ++ (show n) ++ " is "
print (bernoulli n)
|
beni55/ghcjs
|
test/nofib/imaginary/bernouilli/Main.hs
|
Haskell
|
mit
| 1,320
|
{-# LANGUAGE RankNTypes #-}
module T9196 where
f :: (forall a. Eq a) => a -> a
f x = x
g :: (Eq a => Ord a) => a -> a
g x = x
|
forked-upstream-packages-for-ghcjs/ghc
|
testsuite/tests/typecheck/should_fail/T9196.hs
|
Haskell
|
bsd-3-clause
| 128
|
module Channel where
import qualified Data.ByteString as Bs
import qualified Data.Set as S
import qualified Control.Concurrent as C (ThreadId)
import qualified Network.Socket as So hiding (send, sendTo, recv, recvFrom)
-- | Holds the configuration of a channel.
data ChannelConfig = ChannelConfig {
socket :: So.Socket,
-- ^ The UDP Socket from Network.Socket that the channel will use to send and receive
-- messages.
resendTimeout :: Integer,
-- ^ Picoseconds after a package is re-send if no ACK for it is received.
maxResends :: Int,
-- ^ Times that the same package can be re-sended without ACK after considerating it
-- lost.
allowed :: So.SockAddr -> IO(Bool),
-- ^ Function used to determinate if accept or not incomming packages from the given
-- address.
maxPacketSize :: Int,
-- ^ Max bytes that can be sent on this channel packages, larger packages will throw
-- and exception.
recvRetention :: Integer
-- ^ Time that a received and delivired package will remain on memory in order to avoid
-- duplicated receptions.
-- The packages will be stored @recvRetention *resendTimeout * maxResends@ picoseconds after
-- reception and after that, will be freed on the next getReceived call.
}
data ChannelStatus = ChannelStatus {
nextId :: !Int,
sentMsgs :: !(S.Set Message),
unsentMsgs :: !(S.Set Message),
recvMsgs :: !(S.Set Message),
deliveredMsgs :: !(S.Set Message),
receivingThread :: !C.ThreadId,
sendingThread :: !C.ThreadId,
closed :: !Bool
} deriving (Show)
data Message = Message {
msgId :: !Int,
address :: !So.SockAddr,
string :: !Bs.ByteString,
lastSend :: !Integer, -- or reception time in case of incomming messages.
resends :: !Int
} deriving (Show)
instance Eq Message where
(==) m1 m2 = msgId m1 == msgId m2 && address m1 == address m2
instance Ord Message where
compare m1 m2 = compare (msgId m1, address m1) (msgId m2, address m2)
emptyChannel :: C.ThreadId -> C.ThreadId -> ChannelStatus
emptyChannel rtid stid = ChannelStatus 0 S.empty S.empty S.empty S.empty rtid stid False
receiveMsg :: Message -> ChannelStatus -> ChannelStatus
-- ^ Queues a message that has been received.
receiveMsg msg chst =
if S.notMember msg (deliveredMsgs chst) then
chst {recvMsgs = S.insert msg (recvMsgs chst)}
else chst
registerACK :: So.SockAddr -> Int -> ChannelStatus -> ChannelStatus
-- ^ Informs the ChannelStatus that it no longer needs to store the package from the address with
-- the given id , since it was ACKed from the remote host.
registerACK addr mId chst = chst {
sentMsgs = S.delete (Message mId addr Bs.empty 0 0) (sentMsgs chst)
}
queueMsg :: (So.SockAddr,Bs.ByteString) -> ChannelStatus -> ChannelStatus
-- ^ Puts a new message to be sent on the ChannelStatus.
queueMsg (addr,str) chst = chst {
nextId = max 0 $ (nextId chst) + 1,
sentMsgs = S.insert (Message (nextId chst) addr str 0 0) (sentMsgs chst)
}
nextForSending :: ChannelConfig -> Integer -> ChannelStatus -> (S.Set Message, ChannelStatus)
-- ^ Receives the current CPUTime and a ChannelStatus, returns the messages to be sent and updates
-- the ChannelStatus, assuming that they will be sent.
nextForSending chcfg time chst = let
touted = S.filter (\m -> time >= (lastSend m) + (resendTimeout chcfg)) (sentMsgs chst)
touted' = S.map (\m -> m {lastSend = time, resends = resends m + 1}) touted
(ready,failed) = S.partition (\m -> resends m <= maxResends chcfg) touted'
updatedMsgs = S.union ready $ S.difference (sentMsgs chst) failed
chst' = chst {sentMsgs = updatedMsgs, unsentMsgs = S.union failed (unsentMsgs chst)}
in seq touted' $ (ready,chst')
nextForDeliver :: ChannelConfig -> Integer -> ChannelStatus -> (S.Set Message, ChannelStatus)
-- ^ Receives the current CPUTime and a ChannelStatus, returns the messages that can be delivered
-- and cleans the old ones that where retained.
nextForDeliver chcfg time chst = let
retenTime = recvRetention chcfg * resendTimeout chcfg * fromIntegral (maxResends chcfg)
survives m = time <= lastSend m + retenTime
newDelivered = S.difference (S.filter survives (deliveredMsgs chst)) (recvMsgs chst)
in (recvMsgs chst, chst {deliveredMsgs = newDelivered, recvMsgs = S.empty})
|
Autopawn/haskell-secureUDP
|
Channel.hs
|
Haskell
|
mit
| 4,347
|
module GroupCreator.Evaluation
( Condition(..)
, fitness
) where
import GroupCreator.Groupings
import GroupCreator.People
import Data.List
import Data.Ord
import Data.Map
import Data.Hash (hash, asWord64)
data Condition = SizeRestriction { groupSize :: Int }
--make groups of this many people
| Friends { person :: Int, friend :: Int }
--these two want to be in the same group
| Enemies { person :: Int, enemy :: Int }
--these two don't want to be in the same group
| Peers { attributeIndex :: Int }
--if attribute is "sex", groups should try to be either all-males or all-females (only for ENUM attributes)
| Mixed { attributeIndex :: Int }
--if attribute is "sex", groups should try to be mixed (3 M 2 F is better than 4 M 1 F)
deriving (Show)
fitness :: [Condition] -> People -> Grouping -> Double
fitness conditions people grouping = run 0 conditions people grouping
where
run conditionIndex [] people (Grouping grouping) = fromIntegral (asWord64 $ hash grouping) / 1e25
run conditionIndex (cond:conds) people (Grouping grouping) = (1 + conditionIndex) * (eval cond people grouping) + (run (conditionIndex + 1) conds people (Grouping grouping))
-- The higher the number, the worse the result of this evaluation
eval :: Condition -> People -> [[Int]] -> Double
eval (SizeRestriction groupSize) people [] = 0
eval (SizeRestriction groupSize) people (group:groups) = 0.5 * fromIntegral (abs (length group - groupSize)) + (eval (SizeRestriction groupSize) people groups)
eval (Friends person friend) people [] = 0
eval (Friends person friend) people (group:groups)
| intersection == 2 = 0 --person and friend are in the same group
| intersection == 1 = 5 --either person or friend are alone in the group
| otherwise = eval (Friends person friend) people groups
where
intersection = length $ intersect group [person, friend]
eval (Enemies person enemy) people grouping = 5 - eval (Friends person enemy) people grouping
eval (Peers attributeIndex) people [] = 0
eval (Peers attributeIndex) people (firstGroup:groups) = (theRest / majorityCount) + (eval (Peers attributeIndex) people groups)
where
groupAttributeValues = Data.List.map (enumValue . (! attributeIndex)) $ Data.List.map (people !) firstGroup
majorityCount = fromIntegral $ length $ head . sortBy (flip $ comparing length) . group . sort $ groupAttributeValues
theRest = fromIntegral (length firstGroup) - majorityCount
|
cambraca/group-creator
|
GroupCreator/Evaluation.hs
|
Haskell
|
mit
| 2,580
|
{-# htermination index :: Ix a => (a,a) -> a -> Int #-}
|
ComputationWithBoundedResources/ara-inference
|
doc/tpdb_trs/Haskell/full_haskell/Prelude_index_1.hs
|
Haskell
|
mit
| 56
|
module Language.Lua.AST where
import Text.Parsec (SourcePos)
import Language.Lua.Symbol (Intrinsic)
data LuaProgram = LuaProgram FilePath Block deriving(Eq, Ord, Show)
data Block = Block [Statement] (Maybe LastStatement) deriving(Eq, Ord, Show)
data Function = Function SourcePos [Name] Block SourcePos deriving(Eq, Ord, Show)
data Statement =
Assign SourcePos [Exp] [Exp]
|LocalDef SourcePos [Name] [Exp]
|Do SourcePos Block
|If SourcePos Exp Block (Maybe Statement)
|For SourcePos Name Exp Exp (Maybe Exp) Block
|ForEach SourcePos [Name] [Exp] Block
|While SourcePos Exp Block
|Repeat SourcePos Block Exp
|StatementCall FunctionCall
|LocalFunctionDef SourcePos Name Function
|FunctionDef SourcePos Exp Function
|FunctionDefSelf SourcePos Exp Name Function
|EmptyStatement SourcePos
deriving(Eq, Ord, Show)
type Name = (SourcePos, String)
data LastStatement =
Return SourcePos [Exp]
|Break SourcePos
deriving(Eq, Ord, Show)
data FunctionCall =
Call SourcePos Exp [Exp]
|CallSelf SourcePos Exp Name [Exp]
deriving(Eq, Ord, Show)
data Exp =
Literal SourcePos TypedValue
|Fetch Name
|Index SourcePos Exp Exp
|Parens Exp
|Dot SourcePos Exp Name
|ExpCall FunctionCall
|Binop SourcePos Intrinsic Exp Exp
|Unop SourcePos Intrinsic Exp
|Nop
deriving(Eq, Ord, Show)
data TypedValue =
TypedString { typedStringSyntax :: StringSyntax, typedStringValue :: String }
|TypedInt Integer
|TypedReal Double
|TypedBool Bool
|TypedTable [TableItem]
|TypedVararg
|TypedFunction Function
|TypedUserdata
|TypedThread
|TypedNil
deriving(Eq, Ord, Show)
data StringSyntax = DoubleQuoted | SingleQuoted | MultiLined deriving (Eq, Ord, Show)
data TableItem =
TableItemValue SourcePos Exp
|TableItemKeyValue SourcePos String Exp
|TableItemValueValue SourcePos Exp Exp
deriving(Eq, Ord, Show)
data TableItemSyntax = TableItemSyntaxValue | TableItemSyntaxKeyValue | TableItemSyntaxValueValue deriving(Eq, Ord, Show)
-- XXX: uncomfortable
getTableItemSyntax (TableItemValue _ _) = TableItemSyntaxValue
getTableItemSyntax (TableItemKeyValue _ _ _) = TableItemSyntaxKeyValue
getTableItemSyntax (TableItemValueValue _ _ _) = TableItemSyntaxValueValue
|
ykst/llint
|
Language/Lua/AST.hs
|
Haskell
|
mit
| 2,282
|
{-# LANGUAGE FlexibleInstances #-}
{-# LANGUAGE TypeSynonymInstances #-}
-- | This module enables debugging all 'ByteString' to 'Text' to 'String' conversions.
-- This is an internal module.
--
-- @since 0.5.67
module B9.Text
( Text,
LazyText,
ByteString,
LazyByteString,
Textual (..),
writeTextFile,
unsafeRenderToText,
unsafeParseFromText,
parseFromTextWithErrorMessage,
encodeAsUtf8LazyByteString,
)
where
import Control.Exception (displayException)
-- import qualified Data.ByteString as Strict
-- import qualified Data.Text.Encoding.Error as Text
import Control.Monad.IO.Class
import Data.ByteString (ByteString)
import qualified Data.ByteString.Lazy as LazyByteString
import qualified Data.Text as Text
import Data.Text (Text)
import qualified Data.Text.Encoding as Text
import qualified Data.Text.IO as Text
import qualified Data.Text.Lazy as LazyText
import qualified Data.Text.Lazy.Encoding as LazyText
import GHC.Stack
-- | Lazy byte strings.
--
-- A type alias to 'Lazy.ByteString' that can be used everywhere such that
-- references don't need to be qualified with the complete module name everywere.
--
-- @since 0.5.67
type LazyByteString = LazyByteString.ByteString
-- | Lazy texts.
--
-- A type alias to 'LazyText.Text' that can be used everywhere such that
-- references don't need to be qualified with the complete module name everywere.
--
-- @since 0.5.67
type LazyText = LazyText.Text
-- | A class for values that can be converted to/from 'Text'.
--
-- @since 0.5.67
class Textual a where
-- | Convert a 'String' to 'Text'
-- If an error occured, return 'Left' with the error message.
--
-- @since 0.5.67
renderToText :: HasCallStack => a -> Either String Text
-- | Convert a 'Text' to 'String'
--
-- @since 0.5.67
parseFromText :: HasCallStack => Text -> Either String a
instance Textual Text where
renderToText = Right
parseFromText = Right
instance Textual String where
renderToText = Right . Text.pack
parseFromText = Right . Text.unpack
-- | Convert a 'ByteString' with UTF-8 encoded string to 'Text'
--
-- @since 0.5.67
instance Textual ByteString where
renderToText x = case Text.decodeUtf8' x of
Left u ->
Left
( "renderToText of the ByteString failed: "
++ displayException u
++ " "
++ show x
++ "\nat:\n"
++ prettyCallStack callStack
)
Right t -> Right t
parseFromText = Right . Text.encodeUtf8
-- | Convert a 'LazyByteString' with UTF-8 encoded string to 'Text'
--
-- @since 0.5.67
instance Textual LazyByteString where
renderToText x = case LazyText.decodeUtf8' x of
Left u ->
Left
( "renderToText of the LazyByteString failed: "
++ displayException u
++ " "
++ show x
++ "\nat:\n"
++ prettyCallStack callStack
)
Right t -> Right (LazyText.toStrict t)
parseFromText = Right . LazyByteString.fromStrict . Text.encodeUtf8
-- | Render a 'Text' to a file.
--
-- @since 0.5.67
writeTextFile :: (HasCallStack, MonadIO m) => FilePath -> Text -> m ()
writeTextFile f = liftIO . Text.writeFile f
-- | Render a 'Text' via 'renderToText' and throw a runtime exception when rendering fails.
--
-- @since 0.5.67
unsafeRenderToText :: (Textual a, HasCallStack) => a -> Text
unsafeRenderToText = either error id . renderToText
-- | Parse a 'Text' via 'parseFromText' and throw a runtime exception when parsing fails.
--
-- @since 0.5.67
unsafeParseFromText :: (Textual a, HasCallStack) => Text -> a
unsafeParseFromText = either error id . parseFromText
-- | Encode a 'String' as UTF-8 encoded into a 'LazyByteString'.
--
-- @since 0.5.67
encodeAsUtf8LazyByteString :: HasCallStack => String -> LazyByteString
encodeAsUtf8LazyByteString =
LazyByteString.fromStrict . Text.encodeUtf8 . Text.pack
-- | Parse the given 'Text'. \
-- Return @Left errorMessage@ or @Right a@.
--
-- error message.
--
-- @since 0.5.67
parseFromTextWithErrorMessage ::
(HasCallStack, Textual a) =>
-- | An arbitrary string for error messages
String ->
Text ->
Either String a
parseFromTextWithErrorMessage errorMessage b = case parseFromText b of
Left e -> Left (unwords [errorMessage, e])
Right a -> Right a
|
sheyll/b9-vm-image-builder
|
src/lib/B9/Text.hs
|
Haskell
|
mit
| 4,306
|
{-# LANGUAGE RankNTypes #-}
{-# LANGUAGE ApplicativeDo #-}
{-# LANGUAGE OverloadedStrings #-}
{-# LANGUAGE GADTs #-}
{-# LANGUAGE PatternSynonyms #-}
{-# LANGUAGE TemplateHaskell #-}
{-# LANGUAGE ViewPatterns #-}
{-# LANGUAGE PartialTypeSignatures #-}
{-# LANGUAGE RecordWildCards #-}
{-# LANGUAGE BangPatterns #-}
{-# LANGUAGE EmptyCase #-}
module Unison.Codebase.Editor.HandleInput
( loop
, loopState0
, LoopState(..)
, currentPath
, parseSearchType
)
where
import Unison.Prelude
-- TODO: Don't import backend
import qualified Unison.Server.Backend as Backend
import Unison.Server.QueryResult
import Unison.Server.Backend (ShallowListEntry(..), TermEntry(..), TypeEntry(..))
import qualified Unison.Codebase.MainTerm as MainTerm
import Unison.Codebase.Editor.Command as Command
import Unison.Codebase.Editor.Input
import Unison.Codebase.Editor.Output
import Unison.Codebase.Editor.DisplayObject
import qualified Unison.Codebase.Editor.Output as Output
import Unison.Codebase.Editor.SlurpResult (SlurpResult(..))
import qualified Unison.Codebase.Editor.SlurpResult as Slurp
import Unison.Codebase.Editor.SlurpComponent (SlurpComponent(..))
import qualified Unison.Codebase.Editor.SlurpComponent as SC
import Unison.Codebase.Editor.RemoteRepo (printNamespace, WriteRemotePath, writeToRead, writePathToRead)
import qualified Unison.CommandLine.InputPattern as InputPattern
import qualified Unison.CommandLine.InputPatterns as InputPatterns
import Control.Lens
import Control.Monad.State ( StateT )
import qualified Control.Monad.State as State
import Control.Monad.Except ( ExceptT(..), runExceptT, withExceptT)
import Data.Bifunctor ( second, first )
import Data.Configurator ()
import qualified Data.Foldable as Foldable
import qualified Data.List as List
import Data.List.Extra ( nubOrd )
import qualified Data.Map as Map
import qualified Data.Text as Text
import qualified Text.Megaparsec as P
import qualified Data.Set as Set
import Data.Sequence ( Seq(..) )
import qualified Unison.ABT as ABT
import qualified Unison.Codebase.BranchDiff as BranchDiff
import qualified Unison.Codebase.Editor.Output.BranchDiff as OBranchDiff
import Unison.Codebase.Branch ( Branch(..)
, Branch0(..)
)
import qualified Unison.Codebase.Branch as Branch
import qualified Unison.Codebase.BranchUtil as BranchUtil
import qualified Unison.Codebase.Causal as Causal
import qualified Unison.Codebase.Editor.Output.DumpNamespace as Output.DN
import qualified Unison.Codebase.Metadata as Metadata
import Unison.Codebase.Patch ( Patch(..) )
import qualified Unison.Codebase.Patch as Patch
import Unison.Codebase.Path ( Path
, Path'(..) )
import qualified Unison.Codebase.Path as Path
import qualified Unison.Codebase.Reflog as Reflog
import Unison.Server.SearchResult ( SearchResult )
import qualified Unison.Server.SearchResult as SR
import qualified Unison.Server.SearchResult' as SR'
import qualified Unison.Codebase.ShortBranchHash as SBH
import qualified Unison.Codebase.SyncMode as SyncMode
import qualified Unison.Builtin.Decls as DD
import qualified Unison.Runtime.IOSource as DD
import qualified Unison.DataDeclaration as DD
import qualified Unison.HashQualified as HQ
import qualified Unison.HashQualified' as HQ'
import qualified Unison.Name as Name
import Unison.Name ( Name )
import Unison.Names3 ( Names(..), Names0
, pattern Names0 )
import qualified Unison.Names2 as Names
import qualified Unison.Names3 as Names3
import Unison.Parser ( Ann(..) )
import Unison.Reference ( Reference(..) )
import qualified Unison.Reference as Reference
import Unison.Referent ( Referent )
import qualified Unison.Referent as Referent
import Unison.Result ( pattern Result )
import qualified Unison.ShortHash as SH
import Unison.Term (Term)
import qualified Unison.Term as Term
import qualified Unison.Type as Type
import qualified Unison.Result as Result
import qualified Unison.UnisonFile as UF
import qualified Unison.Util.Find as Find
import Unison.Util.Free ( Free )
import qualified Unison.Util.Free as Free
import Unison.Util.List ( uniqueBy )
import qualified Unison.Util.Relation as R
import qualified Unison.Util.Relation4 as R4
import U.Util.Timing (unsafeTime)
import Unison.Util.TransitiveClosure (transitiveClosure)
import Unison.Var ( Var )
import qualified Unison.Var as Var
import qualified Unison.Codebase.TypeEdit as TypeEdit
import Unison.Codebase.TermEdit (TermEdit(..))
import qualified Unison.Codebase.TermEdit as TermEdit
import qualified Unison.Typechecker as Typechecker
import qualified Unison.PrettyPrintEnv as PPE
import Unison.Runtime.IOSource ( isTest )
import qualified Unison.Runtime.IOSource as IOSource
import qualified Unison.Util.Monoid as Monoid
import Unison.UnisonFile (TypecheckedUnisonFile)
import qualified Unison.Codebase.Editor.TodoOutput as TO
import qualified Unison.Lexer as L
import qualified Unison.LabeledDependency as LD
import Unison.LabeledDependency (LabeledDependency)
import Unison.Type (Type)
import qualified Unison.Builtin as Builtin
import qualified Unison.Builtin.Terms as Builtin
import Unison.NameSegment (NameSegment(..))
import qualified Unison.NameSegment as NameSegment
import Unison.Codebase.ShortBranchHash (ShortBranchHash)
import qualified Unison.Codebase.Editor.Propagate as Propagate
import qualified Unison.Codebase.Editor.UriParser as UriParser
import Data.Tuple.Extra (uncurry3)
import qualified Unison.CommandLine.DisplayValues as DisplayValues
import qualified Control.Error.Util as ErrorUtil
import Unison.Util.Monoid (intercalateMap)
import qualified Unison.Util.Star3 as Star3
import qualified Unison.Util.Pretty as P
import qualified Unison.Util.Relation as Relation
import Data.List.NonEmpty (NonEmpty)
import qualified Data.List.NonEmpty as Nel
import Unison.Codebase.Editor.AuthorInfo (AuthorInfo(..))
type F m i v = Free (Command m i v)
-- type (Action m i v) a
type Action m i v = MaybeT (StateT (LoopState m v) (F m i v))
data LoopState m v
= LoopState
{ _root :: Branch m
, _lastSavedRoot :: Branch m
-- the current position in the namespace
, _currentPathStack :: NonEmpty Path.Absolute
-- TBD
-- , _activeEdits :: Set Branch.EditGuid
-- The file name last modified, and whether to skip the next file
-- change event for that path (we skip file changes if the file has
-- just been modified programmatically)
, _latestFile :: Maybe (FilePath, SkipNextUpdate)
, _latestTypecheckedFile :: Maybe (UF.TypecheckedUnisonFile v Ann)
-- The previous user input. Used to request confirmation of
-- questionable user commands.
, _lastInput :: Maybe Input
-- A 1-indexed list of strings that can be referenced by index at the
-- CLI prompt. e.g. Given ["Foo.bat", "Foo.cat"],
-- `rename 2 Foo.foo` will rename `Foo.cat` to `Foo.foo`.
, _numberedArgs :: NumberedArgs
}
type SkipNextUpdate = Bool
type InputDescription = Text
makeLenses ''LoopState
-- replacing the old read/write scalar Lens with "peek" Getter for the NonEmpty
currentPath :: Getter (LoopState m v) Path.Absolute
currentPath = currentPathStack . to Nel.head
loopState0 :: Branch m -> Path.Absolute -> LoopState m v
loopState0 b p = LoopState b b (pure p) Nothing Nothing Nothing []
type Action' m v = Action m (Either Event Input) v
defaultPatchNameSegment :: NameSegment
defaultPatchNameSegment = "patch"
prettyPrintEnvDecl :: Names -> Action' m v PPE.PrettyPrintEnvDecl
prettyPrintEnvDecl ns = eval CodebaseHashLength <&> (`PPE.fromNamesDecl` ns)
loop :: forall m v . (Monad m, Var v) => Action m (Either Event Input) v ()
loop = do
uf <- use latestTypecheckedFile
root' <- use root
currentPath' <- use currentPath
latestFile' <- use latestFile
currentBranch' <- getAt currentPath'
e <- eval Input
hqLength <- eval CodebaseHashLength
sbhLength <- eval BranchHashLength
let
currentPath'' = Path.unabsolute currentPath'
hqNameQuery q = eval $ HQNameQuery (Just currentPath'') root' q
sbh = SBH.fromHash sbhLength
root0 = Branch.head root'
currentBranch0 = Branch.head currentBranch'
defaultPatchPath :: PatchPath
defaultPatchPath = (Path' $ Left currentPath', defaultPatchNameSegment)
resolveSplit' :: (Path', a) -> (Path, a)
resolveSplit' = Path.fromAbsoluteSplit . Path.toAbsoluteSplit currentPath'
resolveToAbsolute :: Path' -> Path.Absolute
resolveToAbsolute = Path.resolve currentPath'
getAtSplit :: Path.Split -> Maybe (Branch m)
getAtSplit p = BranchUtil.getBranch p root0
getAtSplit' :: Path.Split' -> Maybe (Branch m)
getAtSplit' = getAtSplit . resolveSplit'
getPatchAtSplit' :: Path.Split' -> Action' m v (Maybe Patch)
getPatchAtSplit' s = do
let (p, seg) = Path.toAbsoluteSplit currentPath' s
b <- getAt p
eval . Eval $ Branch.getMaybePatch seg (Branch.head b)
getHQ'TermsIncludingHistorical p =
getTermsIncludingHistorical (resolveSplit' p) root0
getHQ'Terms :: Path.HQSplit' -> Set Referent
getHQ'Terms p = BranchUtil.getTerm (resolveSplit' p) root0
getHQ'Types :: Path.HQSplit' -> Set Reference
getHQ'Types p = BranchUtil.getType (resolveSplit' p) root0
getHQTerms :: HQ.HashQualified Name -> Action' m v (Set Referent)
getHQTerms hq = case hq of
HQ.NameOnly n -> let
-- absolute-ify the name, then lookup in deepTerms of root
path :: Path.Path'
path = Path.fromName' n
Path.Absolute absPath = resolveToAbsolute path
in pure $ R.lookupRan (Path.toName absPath) (Branch.deepTerms root0)
HQ.HashOnly sh -> hashOnly sh
HQ.HashQualified _ sh -> hashOnly sh
where
hashOnly sh = eval $ TermReferentsByShortHash sh
basicPrettyPrintNames0 =
Backend.basicPrettyPrintNames0 root' (Path.unabsolute currentPath')
resolveHHQS'Types :: HashOrHQSplit' -> Action' m v (Set Reference)
resolveHHQS'Types = either
(eval . TypeReferencesByShortHash)
(pure . getHQ'Types)
-- Term Refs and Cons
resolveHHQS'Referents = either
(eval . TermReferentsByShortHash)
(pure . getHQ'Terms)
getTypes :: Path.Split' -> Set Reference
getTypes = getHQ'Types . fmap HQ'.NameOnly
getTerms :: Path.Split' -> Set Referent
getTerms = getHQ'Terms . fmap HQ'.NameOnly
getPatchAt :: Path.Split' -> Action' m v Patch
getPatchAt patchPath' = do
let (p, seg) = Path.toAbsoluteSplit currentPath' patchPath'
b <- getAt p
eval . Eval $ Branch.getPatch seg (Branch.head b)
withFile ambient sourceName lexed@(text, tokens) k = do
let
getHQ = \case
L.Backticks s (Just sh) ->
Just (HQ.HashQualified (Name.unsafeFromString s) sh)
L.WordyId s (Just sh) ->
Just (HQ.HashQualified (Name.unsafeFromString s) sh)
L.SymbolyId s (Just sh) ->
Just (HQ.HashQualified (Name.unsafeFromString s) sh)
L.Hash sh -> Just (HQ.HashOnly sh)
_ -> Nothing
hqs = Set.fromList . mapMaybe (getHQ . L.payload) $ tokens
let parseNames = Backend.getCurrentParseNames currentPath'' root'
latestFile .= Just (Text.unpack sourceName, False)
latestTypecheckedFile .= Nothing
Result notes r <- eval $ Typecheck ambient parseNames sourceName lexed
case r of
-- Parsing failed
Nothing -> respond $
ParseErrors text [ err | Result.Parsing err <- toList notes ]
Just (Left errNames) -> do
ns <- makeShadowedPrintNamesFromHQ hqs errNames
ppe <- suffixifiedPPE ns
let tes = [ err | Result.TypeError err <- toList notes ]
cbs = [ bug
| Result.CompilerBug (Result.TypecheckerBug bug)
<- toList notes
]
when (not $ null tes) . respond $ TypeErrors text ppe tes
when (not $ null cbs) . respond $ CompilerBugs text ppe cbs
Just (Right uf) -> k uf
loadUnisonFile sourceName text = do
let lexed = L.lexer (Text.unpack sourceName) (Text.unpack text)
withFile [] sourceName (text, lexed) $ \unisonFile -> do
sr <- toSlurpResult currentPath' unisonFile <$> slurpResultNames0
names <- displayNames unisonFile
pped <- prettyPrintEnvDecl names
let ppe = PPE.suffixifiedPPE pped
eval . Notify $ Typechecked sourceName ppe sr unisonFile
unlessError' EvaluationFailure do
(bindings, e) <- ExceptT . eval . Evaluate ppe $ unisonFile
lift do
let e' = Map.map go e
go (ann, kind, _hash, _uneval, eval, isHit) = (ann, kind, eval, isHit)
unless (null e') $
eval . Notify $ Evaluated text ppe bindings e'
latestTypecheckedFile .= Just unisonFile
case e of
Left (IncomingRootBranch hashes) ->
eval . Notify $ WarnIncomingRootBranch
(SBH.fromHash sbhLength $ Branch.headHash root')
(Set.map (SBH.fromHash sbhLength) hashes)
Left (UnisonFileChanged sourceName text) ->
-- We skip this update if it was programmatically generated
if maybe False snd latestFile'
then modifying latestFile (fmap (const False) <$>)
else loadUnisonFile sourceName text
Right input ->
let
ifConfirmed = ifM (confirmedCommand input)
branchNotFound = respond . BranchNotFound
branchNotFound' = respond . BranchNotFound . Path.unsplit'
patchNotFound :: Path.Split' -> Action' m v ()
patchNotFound s = respond $ PatchNotFound s
patchExists :: Path.Split' -> Action' m v ()
patchExists s = respond $ PatchAlreadyExists s
typeNotFound = respond . TypeNotFound
typeNotFound' = respond . TypeNotFound'
termNotFound = respond . TermNotFound
termNotFound' = respond . TermNotFound'
nameConflicted src tms tys = respond (DeleteNameAmbiguous hqLength src tms tys)
typeConflicted src = nameConflicted src Set.empty
termConflicted src tms = nameConflicted src tms Set.empty
hashConflicted src = respond . HashAmbiguous src
typeReferences :: [SearchResult] -> [Reference]
typeReferences rs
= [ r | SR.Tp (SR.TypeResult _ r _) <- rs ]
termReferences :: [SearchResult] -> [Reference]
termReferences rs =
[ r | SR.Tm (SR.TermResult _ (Referent.Ref r) _) <- rs ]
termResults rs = [ r | SR.Tm r <- rs ]
typeResults rs = [ r | SR.Tp r <- rs ]
doRemoveReplacement from patchPath isTerm = do
let patchPath' = fromMaybe defaultPatchPath patchPath
patch <- getPatchAt patchPath'
QueryResult misses' hits <- hqNameQuery [from]
let tpRefs = Set.fromList $ typeReferences hits
tmRefs = Set.fromList $ termReferences hits
misses = Set.difference (Set.fromList misses') if isTerm
then Set.fromList $ HQ'.toHQ . SR.termName <$> termResults hits
else Set.fromList $ HQ'.toHQ . SR.typeName <$> typeResults hits
go :: Reference -> Action m (Either Event Input) v ()
go fr = do
let termPatch =
over Patch.termEdits (R.deleteDom fr) patch
typePatch =
over Patch.typeEdits (R.deleteDom fr) patch
(patchPath'', patchName) = resolveSplit' patchPath'
-- Save the modified patch
stepAtM inputDescription
(patchPath'',
Branch.modifyPatches
patchName
(const (if isTerm then termPatch else typePatch)))
-- Say something
success
unless (Set.null misses) $
respond $ SearchTermsNotFound (Set.toList misses)
traverse_ go (if isTerm then tmRefs else tpRefs)
branchExists dest _x = respond $ BranchAlreadyExists dest
branchExistsSplit = branchExists . Path.unsplit'
typeExists dest = respond . TypeAlreadyExists dest
termExists dest = respond . TermAlreadyExists dest
-- | try to get these as close as possible to the command that caused the change
inputDescription :: InputDescription
inputDescription = case input of
ForkLocalBranchI src dest -> "fork " <> hp' src <> " " <> p' dest
MergeLocalBranchI src dest mode -> case mode of
Branch.RegularMerge -> "merge " <> p' src <> " " <> p' dest
Branch.SquashMerge -> "merge.squash " <> p' src <> " " <> p' dest
ResetRootI src -> "reset-root " <> hp' src
AliasTermI src dest -> "alias.term " <> hhqs' src <> " " <> ps' dest
AliasTypeI src dest -> "alias.type " <> hhqs' src <> " " <> ps' dest
AliasManyI srcs dest ->
"alias.many " <> intercalateMap " " hqs srcs <> " " <> p' dest
MoveTermI src dest -> "move.term " <> hqs' src <> " " <> ps' dest
MoveTypeI src dest -> "move.type " <> hqs' src <> " " <> ps' dest
MoveBranchI src dest -> "move.namespace " <> ops' src <> " " <> ps' dest
MovePatchI src dest -> "move.patch " <> ps' src <> " " <> ps' dest
CopyPatchI src dest -> "copy.patch " <> ps' src <> " " <> ps' dest
DeleteI thing -> "delete " <> hqs' thing
DeleteTermI def -> "delete.term " <> hqs' def
DeleteTypeI def -> "delete.type " <> hqs' def
DeleteBranchI opath -> "delete.namespace " <> ops' opath
DeletePatchI path -> "delete.patch " <> ps' path
ReplaceI src target p ->
"replace " <> HQ.toText src <> " "
<> HQ.toText target <> " "
<> opatch p
ResolveTermNameI path -> "resolve.termName " <> hqs' path
ResolveTypeNameI path -> "resolve.typeName " <> hqs' path
AddI _selection -> "add"
UpdateI p _selection -> "update " <> opatch p
PropagatePatchI p scope -> "patch " <> ps' p <> " " <> p' scope
UndoI{} -> "undo"
UiI -> "ui"
ExecuteI s -> "execute " <> Text.pack s
IOTestI hq -> "io.test " <> HQ.toText hq
LinkI md defs ->
"link " <> HQ.toText md <> " " <> intercalateMap " " hqs' defs
UnlinkI md defs ->
"unlink " <> HQ.toText md <> " " <> intercalateMap " " hqs' defs
UpdateBuiltinsI -> "builtins.update"
MergeBuiltinsI -> "builtins.merge"
MergeIOBuiltinsI -> "builtins.mergeio"
PullRemoteBranchI orepo dest _syncMode ->
(Text.pack . InputPattern.patternName
$ InputPatterns.patternFromInput input)
<> " "
-- todo: show the actual config-loaded namespace
<> maybe "(remote namespace from .unisonConfig)"
(uncurry3 printNamespace) orepo
<> " "
<> p' dest
LoadI{} -> wat
PreviewAddI{} -> wat
PreviewUpdateI{} -> wat
CreateAuthorI (NameSegment id) name -> "create.author " <> id <> " " <> name
CreatePullRequestI{} -> wat
LoadPullRequestI base head dest ->
"pr.load "
<> uncurry3 printNamespace base
<> " "
<> uncurry3 printNamespace head
<> " "
<> p' dest
PushRemoteBranchI{} -> wat
PreviewMergeLocalBranchI{} -> wat
DiffNamespaceI{} -> wat
SwitchBranchI{} -> wat
PopBranchI{} -> wat
NamesI{} -> wat
TodoI{} -> wat
ListEditsI{} -> wat
ListDependenciesI{} -> wat
ListDependentsI{} -> wat
HistoryI{} -> wat
TestI{} -> wat
LinksI{} -> wat
SearchByNameI{} -> wat
FindShallowI{} -> wat
FindPatchI{} -> wat
ShowDefinitionI{} -> wat
DisplayI{} -> wat
DocsI{} -> wat
ShowDefinitionByPrefixI{} -> wat
ShowReflogI{} -> wat
DebugNumberedArgsI{} -> wat
DebugBranchHistoryI{} -> wat
DebugTypecheckedUnisonFileI{} -> wat
DebugDumpNamespacesI{} -> wat
DebugDumpNamespaceSimpleI{} -> wat
DebugClearWatchI {} -> wat
QuitI{} -> wat
DeprecateTermI{} -> undefined
DeprecateTypeI{} -> undefined
RemoveTermReplacementI src p ->
"delete.term-replacement" <> HQ.toText src <> " " <> opatch p
RemoveTypeReplacementI src p ->
"delete.type-replacement" <> HQ.toText src <> " " <> opatch p
where
hp' = either (Text.pack . show) p'
p' = Text.pack . show . resolveToAbsolute
ops' = maybe "." ps'
opatch = ps' . fromMaybe defaultPatchPath
wat = error $ show input ++ " is not expected to alter the branch"
hhqs' (Left sh) = SH.toText sh
hhqs' (Right x) = hqs' x
hqs' (p, hq) =
Monoid.unlessM (Path.isRoot' p) (p' p) <> "." <> Text.pack (show hq)
hqs (p, hq) = hqs' (Path' . Right . Path.Relative $ p, hq)
ps' = p' . Path.unsplit'
stepAt = Unison.Codebase.Editor.HandleInput.stepAt inputDescription
stepManyAt = Unison.Codebase.Editor.HandleInput.stepManyAt inputDescription
stepManyAtNoSync =
Unison.Codebase.Editor.HandleInput.stepManyAtNoSync
updateRoot = flip Unison.Codebase.Editor.HandleInput.updateRoot inputDescription
syncRoot = use root >>= updateRoot
updateAtM = Unison.Codebase.Editor.HandleInput.updateAtM inputDescription
unlessGitError = unlessError' (Output.GitError input)
importRemoteBranch ns mode = ExceptT . eval $ ImportRemoteBranch ns mode
viewRemoteBranch ns = ExceptT . eval $ ViewRemoteBranch ns
syncRemoteRootBranch repo b mode =
ExceptT . eval $ SyncRemoteRootBranch repo b mode
loadSearchResults = eval . LoadSearchResults
handleFailedDelete failed failedDependents = do
failed <- loadSearchResults $ SR.fromNames failed
failedDependents <- loadSearchResults $ SR.fromNames failedDependents
ppe <- fqnPPE =<< makePrintNamesFromLabeled'
(foldMap SR'.labeledDependencies $ failed <> failedDependents)
respond $ CantDelete ppe failed failedDependents
saveAndApplyPatch patchPath'' patchName patch' = do
stepAtM (inputDescription <> " (1/2)")
(patchPath'',
Branch.modifyPatches patchName (const patch'))
-- Apply the modified patch to the current path
-- since we might be able to propagate further.
void $ propagatePatch inputDescription patch' currentPath'
-- Say something
success
previewResponse sourceName sr uf = do
names <- displayNames uf
ppe <- PPE.suffixifiedPPE <$> prettyPrintEnvDecl names
respond $ Typechecked (Text.pack sourceName) ppe sr uf
addDefaultMetadata
:: SlurpComponent v
-> Action m (Either Event Input) v ()
addDefaultMetadata adds = do
let addedVs = Set.toList $ SC.types adds <> SC.terms adds
addedNs = traverse (Path.hqSplitFromName' . Name.fromVar) addedVs
case addedNs of
Nothing ->
error $ "I couldn't parse a name I just added to the codebase! "
<> "-- Added names: " <> show addedVs
Just addedNames -> do
dm <- resolveDefaultMetadata currentPath'
case toList dm of
[] -> pure ()
dm' -> do
let hqs = traverse InputPatterns.parseHashQualifiedName dm'
case hqs of
Left e -> respond $ ConfiguredMetadataParseError
(Path.absoluteToPath' currentPath')
(show dm')
e
Right defaultMeta ->
manageLinks True addedNames defaultMeta Metadata.insert
-- Add/remove links between definitions and metadata.
-- `silent` controls whether this produces any output to the user.
-- `srcs` is (names of the) definitions to pass to `op`
-- `mdValues` is (names of the) metadata to pass to `op`
-- `op` is the operation to add/remove/alter metadata mappings.
-- e.g. `Metadata.insert` is passed to add metadata links.
manageLinks :: Bool
-> [(Path', HQ'.HQSegment)]
-> [HQ.HashQualified Name]
-> (forall r. Ord r
=> (r, Metadata.Type, Metadata.Value)
-> Branch.Star r NameSegment
-> Branch.Star r NameSegment)
-> Action m (Either Event Input) v ()
manageLinks silent srcs mdValues op = do
mdValuels <- fmap (first toList) <$>
traverse (\x -> fmap (,x) (getHQTerms x)) mdValues
before <- Branch.head <$> use root
traverse_ go mdValuels
after <- Branch.head <$> use root
(ppe, outputDiff) <- diffHelper before after
if not silent then
if OBranchDiff.isEmpty outputDiff
then respond NoOp
else respondNumbered $ ShowDiffNamespace Path.absoluteEmpty
Path.absoluteEmpty
ppe
outputDiff
else unless (OBranchDiff.isEmpty outputDiff) $
respond DefaultMetadataNotification
where
go (mdl, hqn) = do
newRoot <- use root
let r0 = Branch.head newRoot
getTerms p = BranchUtil.getTerm (resolveSplit' p) r0
getTypes p = BranchUtil.getType (resolveSplit' p) r0
!srcle = toList . getTerms =<< srcs
!srclt = toList . getTypes =<< srcs
ppe = Backend.basicSuffixifiedNames
sbhLength
newRoot
(Path.unabsolute currentPath')
case mdl of
[r@(Referent.Ref mdValue)] -> do
mdType <- eval $ LoadTypeOfTerm mdValue
case mdType of
Nothing -> respond $ MetadataMissingType ppe r
Just ty -> do
let steps =
bimap (Path.unabsolute . resolveToAbsolute)
(const . step $ Type.toReference ty)
<$> srcs
stepManyAtNoSync steps
where
step mdType b0 =
let tmUpdates terms = foldl' go terms srcle
where go terms src = op (src, mdType, mdValue) terms
tyUpdates types = foldl' go types srclt
where go types src = op (src, mdType, mdValue) types
in over Branch.terms tmUpdates . over Branch.types tyUpdates $ b0
mdValues -> respond $ MetadataAmbiguous hqn ppe mdValues
delete
:: (Path.HQSplit' -> Set Referent) -- compute matching terms
-> (Path.HQSplit' -> Set Reference) -- compute matching types
-> Path.HQSplit'
-> Action' m v ()
delete getHQ'Terms getHQ'Types hq = do
let matchingTerms = toList (getHQ'Terms hq)
let matchingTypes = toList (getHQ'Types hq)
case (matchingTerms, matchingTypes) of
([], []) -> respond (NameNotFound hq)
(Set.fromList -> tms, Set.fromList -> tys) -> goMany tms tys
where
resolvedPath = resolveSplit' (HQ'.toName <$> hq)
goMany tms tys = do
let rootNames = Branch.toNames0 root0
name = Path.toName (Path.unsplit resolvedPath)
toRel :: Ord ref => Set ref -> R.Relation Name ref
toRel = R.fromList . fmap (name,) . toList
-- these names are relative to the root
toDelete = Names0 (toRel tms) (toRel tys)
(failed, failedDependents) <-
getEndangeredDependents (eval . GetDependents) toDelete rootNames
if failed == mempty then do
let makeDeleteTermNames = fmap (BranchUtil.makeDeleteTermName resolvedPath) . toList $ tms
let makeDeleteTypeNames = fmap (BranchUtil.makeDeleteTypeName resolvedPath) . toList $ tys
stepManyAt (makeDeleteTermNames ++ makeDeleteTypeNames)
root'' <- use root
diffHelper (Branch.head root') (Branch.head root'') >>=
respondNumbered . uncurry ShowDiffAfterDeleteDefinitions
else handleFailedDelete failed failedDependents
displayI outputLoc hq = do
uf <- use latestTypecheckedFile >>= addWatch (HQ.toString hq)
case uf of
Nothing -> do
let parseNames0 = (`Names3.Names` mempty) basicPrettyPrintNames0
results = Names3.lookupHQTerm hq parseNames0
if Set.null results then
respond $ SearchTermsNotFound [hq]
else if Set.size results > 1 then
respond $ TermAmbiguous hq results
-- ... but use the unsuffixed names for display
else do
let tm = Term.fromReferent External $ Set.findMin results
pped <- prettyPrintEnvDecl parseNames0
tm <- eval $ Evaluate1 (PPE.suffixifiedPPE pped) True tm
case tm of
Left e -> respond (EvaluationFailure e)
Right tm -> doDisplay outputLoc parseNames0 (Term.unannotate tm)
Just (toDisplay, unisonFile) -> do
ppe <- executePPE unisonFile
unlessError' EvaluationFailure do
evalResult <- ExceptT . eval . Evaluate ppe $ unisonFile
case Command.lookupEvalResult toDisplay evalResult of
Nothing -> error $ "Evaluation dropped a watch expression: " <> HQ.toString hq
Just tm -> lift do
ns <- displayNames unisonFile
doDisplay outputLoc ns tm
in case input of
ShowReflogI -> do
entries <- convertEntries Nothing [] <$> eval LoadReflog
numberedArgs .=
fmap (('#':) . SBH.toString . Output.hash) entries
respond $ ShowReflog entries
where
-- reverses & formats entries, adds synthetic entries when there is a
-- discontinuity in the reflog.
convertEntries :: Maybe Branch.Hash
-> [Output.ReflogEntry]
-> [Reflog.Entry]
-> [Output.ReflogEntry]
convertEntries _ acc [] = acc
convertEntries Nothing acc entries@(Reflog.Entry old _ _ : _) =
convertEntries
(Just old)
(Output.ReflogEntry (SBH.fromHash sbhLength old) "(initial reflogged namespace)" : acc)
entries
convertEntries (Just lastHash) acc entries@(Reflog.Entry old new reason : rest) =
if lastHash /= old then
convertEntries
(Just old)
(Output.ReflogEntry (SBH.fromHash sbhLength old) "(external change)" : acc)
entries
else
convertEntries
(Just new)
(Output.ReflogEntry (SBH.fromHash sbhLength new) reason : acc)
rest
ResetRootI src0 ->
case src0 of
Left hash -> unlessError do
newRoot <- resolveShortBranchHash hash
lift do
updateRoot newRoot
success
Right path' -> do
newRoot <- getAt $ resolveToAbsolute path'
if Branch.isEmpty newRoot then respond $ BranchNotFound path'
else do
updateRoot newRoot
success
ForkLocalBranchI src0 dest0 -> do
let tryUpdateDest srcb dest0 = do
let dest = resolveToAbsolute dest0
-- if dest isn't empty: leave dest unchanged, and complain.
destb <- getAt dest
if Branch.isEmpty destb then do
ok <- updateAtM dest (const $ pure srcb)
if ok then success else respond $ BranchEmpty src0
else respond $ BranchAlreadyExists dest0
case src0 of
Left hash -> unlessError do
srcb <- resolveShortBranchHash hash
lift $ tryUpdateDest srcb dest0
Right path' -> do
srcb <- getAt $ resolveToAbsolute path'
if Branch.isEmpty srcb then respond $ BranchNotFound path'
else tryUpdateDest srcb dest0
MergeLocalBranchI src0 dest0 mergeMode -> do
let [src, dest] = resolveToAbsolute <$> [src0, dest0]
srcb <- getAt src
if Branch.isEmpty srcb then branchNotFound src0
else do
let err = Just $ MergeAlreadyUpToDate src0 dest0
mergeBranchAndPropagateDefaultPatch mergeMode inputDescription err srcb (Just dest0) dest
PreviewMergeLocalBranchI src0 dest0 -> do
let [src, dest] = resolveToAbsolute <$> [src0, dest0]
srcb <- getAt src
if Branch.isEmpty srcb then branchNotFound src0
else do
destb <- getAt dest
merged <- eval $ Merge Branch.RegularMerge srcb destb
if merged == destb
then respond (PreviewMergeAlreadyUpToDate src0 dest0)
else
diffHelper (Branch.head destb) (Branch.head merged) >>=
respondNumbered . uncurry (ShowDiffAfterMergePreview dest0 dest)
DiffNamespaceI before0 after0 -> do
let [beforep, afterp] =
resolveToAbsolute <$> [before0, after0]
before <- Branch.head <$> getAt beforep
after <- Branch.head <$> getAt afterp
(ppe, outputDiff) <- diffHelper before after
respondNumbered $ ShowDiffNamespace beforep afterp ppe outputDiff
CreatePullRequestI baseRepo headRepo -> unlessGitError do
(cleanupBase, baseBranch) <- viewRemoteBranch baseRepo
(cleanupHead, headBranch) <- viewRemoteBranch headRepo
lift do
merged <- eval $ Merge Branch.RegularMerge baseBranch headBranch
(ppe, diff) <- diffHelper (Branch.head baseBranch) (Branch.head merged)
respondNumbered $ ShowDiffAfterCreatePR baseRepo headRepo ppe diff
eval . Eval $ do
cleanupBase
cleanupHead
LoadPullRequestI baseRepo headRepo dest0 -> do
let desta = resolveToAbsolute dest0
let dest = Path.unabsolute desta
destb <- getAt desta
if Branch.isEmpty0 (Branch.head destb) then unlessGitError do
baseb <- importRemoteBranch baseRepo SyncMode.ShortCircuit
headb <- importRemoteBranch headRepo SyncMode.ShortCircuit
lift $ do
mergedb <- eval $ Merge Branch.RegularMerge baseb headb
squashedb <- eval $ Merge Branch.SquashMerge headb baseb
stepManyAt
[BranchUtil.makeSetBranch (dest, "base") baseb
,BranchUtil.makeSetBranch (dest, "head") headb
,BranchUtil.makeSetBranch (dest, "merged") mergedb
,BranchUtil.makeSetBranch (dest, "squashed") squashedb]
let base = snoc dest0 "base"
head = snoc dest0 "head"
merged = snoc dest0 "merged"
squashed = snoc dest0 "squashed"
respond $ LoadPullRequest baseRepo headRepo base head merged squashed
loadPropagateDiffDefaultPatch
inputDescription
(Just merged)
(snoc desta "merged")
else
respond . BranchNotEmpty . Path.Path' . Left $ currentPath'
-- move the root to a sub-branch
MoveBranchI Nothing dest -> do
b <- use root
stepManyAt [ (Path.empty, const Branch.empty0)
, BranchUtil.makeSetBranch (resolveSplit' dest) b ]
success
MoveBranchI (Just src) dest ->
maybe (branchNotFound' src) srcOk (getAtSplit' src)
where
srcOk b = maybe (destOk b) (branchExistsSplit dest) (getAtSplit' dest)
destOk b = do
stepManyAt
[ BranchUtil.makeSetBranch (resolveSplit' src) Branch.empty
, BranchUtil.makeSetBranch (resolveSplit' dest) b ]
success -- could give rando stats about new defns
MovePatchI src dest -> do
psrc <- getPatchAtSplit' src
pdest <- getPatchAtSplit' dest
case (psrc, pdest) of
(Nothing, _) -> patchNotFound src
(_, Just _) -> patchExists dest
(Just p, Nothing) -> do
stepManyAt [
BranchUtil.makeDeletePatch (resolveSplit' src),
BranchUtil.makeReplacePatch (resolveSplit' dest) p ]
success
CopyPatchI src dest -> do
psrc <- getPatchAtSplit' src
pdest <- getPatchAtSplit' dest
case (psrc, pdest) of
(Nothing, _) -> patchNotFound src
(_, Just _) -> patchExists dest
(Just p, Nothing) -> do
stepAt (BranchUtil.makeReplacePatch (resolveSplit' dest) p)
success
DeletePatchI src -> do
psrc <- getPatchAtSplit' src
case psrc of
Nothing -> patchNotFound src
Just _ -> do
stepAt (BranchUtil.makeDeletePatch (resolveSplit' src))
success
DeleteBranchI Nothing ->
ifConfirmed
(do
stepAt (Path.empty, const Branch.empty0)
respond DeletedEverything)
(respond DeleteEverythingConfirmation)
DeleteBranchI (Just p) ->
maybe (branchNotFound' p) go $ getAtSplit' p
where
go (Branch.head -> b) = do
(failed, failedDependents) <-
let rootNames = Branch.toNames0 root0
toDelete = Names.prefix0
(Path.toName . Path.unsplit . resolveSplit' $ p) -- resolveSplit' incorporates currentPath
(Branch.toNames0 b)
in getEndangeredDependents (eval . GetDependents) toDelete rootNames
if failed == mempty then do
stepAt $ BranchUtil.makeSetBranch (resolveSplit' p) Branch.empty
-- Looks similar to the 'toDelete' above... investigate me! ;)
diffHelper b Branch.empty0 >>=
respondNumbered
. uncurry (ShowDiffAfterDeleteBranch
$ resolveToAbsolute (Path.unsplit' p))
else handleFailedDelete failed failedDependents
SwitchBranchI path' -> do
let path = resolveToAbsolute path'
currentPathStack %= Nel.cons path
branch' <- getAt path
when (Branch.isEmpty branch') (respond $ CreatedNewBranch path)
PopBranchI -> use (currentPathStack . to Nel.uncons) >>= \case
(_, Nothing) -> respond StartOfCurrentPathHistory
(_, Just t) -> currentPathStack .= t
HistoryI resultsCap diffCap from -> case from of
Left hash -> unlessError do
b <- resolveShortBranchHash hash
lift $ doHistory 0 b []
Right path' -> do
let path = resolveToAbsolute path'
branch' <- getAt path
if Branch.isEmpty branch' then respond $ CreatedNewBranch path
else doHistory 0 branch' []
where
doHistory !n b acc =
if maybe False (n >=) resultsCap then
respond $ History diffCap acc (PageEnd (sbh $ Branch.headHash b) n)
else case Branch._history b of
Causal.One{} ->
respond $ History diffCap acc (EndOfLog . sbh $ Branch.headHash b)
Causal.Merge{..} ->
respond $ History diffCap acc (MergeTail (sbh $ Branch.headHash b) . map sbh $ Map.keys tails)
Causal.Cons{..} -> do
b' <- fmap Branch.Branch . eval . Eval $ snd tail
let elem = (sbh $ Branch.headHash b, Branch.namesDiff b' b)
doHistory (n+1) b' (elem : acc)
UndoI -> do
prev <- eval . Eval $ Branch.uncons root'
case prev of
Nothing ->
respond . CantUndo $ if Branch.isOne root' then CantUndoPastStart
else CantUndoPastMerge
Just (_, prev) -> do
updateRoot prev
diffHelper (Branch.head prev) (Branch.head root') >>=
respondNumbered . uncurry Output.ShowDiffAfterUndo
UiI -> eval UI
AliasTermI src dest -> do
referents <- resolveHHQS'Referents src
case (toList referents, toList (getTerms dest)) of
([r], []) -> do
stepAt (BranchUtil.makeAddTermName (resolveSplit' dest) r (oldMD r))
success
([_], rs@(_:_)) -> termExists dest (Set.fromList rs)
([], _) -> either termNotFound' termNotFound src
(rs, _) ->
either hashConflicted termConflicted src (Set.fromList rs)
where
oldMD r = either (const mempty)
(\src ->
let p = resolveSplit' src in
BranchUtil.getTermMetadataAt p r root0)
src
AliasTypeI src dest -> do
refs <- resolveHHQS'Types src
case (toList refs, toList (getTypes dest)) of
([r], []) -> do
stepAt (BranchUtil.makeAddTypeName (resolveSplit' dest) r (oldMD r))
success
([_], rs@(_:_)) -> typeExists dest (Set.fromList rs)
([], _) -> either typeNotFound' typeNotFound src
(rs, _) ->
either
(\src -> hashConflicted src . Set.map Referent.Ref)
typeConflicted
src
(Set.fromList rs)
where
oldMD r =
either (const mempty)
(\src ->
let p = resolveSplit' src in
BranchUtil.getTypeMetadataAt p r root0)
src
-- this implementation will happily produce name conflicts,
-- but will surface them in a normal diff at the end of the operation.
AliasManyI srcs dest' -> do
let destAbs = resolveToAbsolute dest'
old <- getAt destAbs
let (unknown, actions) = foldl' go mempty srcs
stepManyAt actions
new <- getAt destAbs
diffHelper (Branch.head old) (Branch.head new) >>=
respondNumbered . uncurry (ShowDiffAfterModifyBranch dest' destAbs)
unless (null unknown) $
respond . SearchTermsNotFound . fmap fixupOutput $ unknown
where
-- a list of missing sources (if any) and the actions that do the work
go :: ([Path.HQSplit], [(Path, Branch0 m -> Branch0 m)])
-> Path.HQSplit
-> ([Path.HQSplit], [(Path, Branch0 m -> Branch0 m)])
go (missingSrcs, actions) hqsrc =
let
src :: Path.Split
src = second HQ'.toName hqsrc
proposedDest :: Path.Split
proposedDest = second HQ'.toName hqProposedDest
hqProposedDest :: Path.HQSplit
hqProposedDest = first Path.unabsolute $
Path.resolve (resolveToAbsolute dest') hqsrc
-- `Nothing` if src doesn't exist
doType :: Maybe [(Path, Branch0 m -> Branch0 m)]
doType = case ( BranchUtil.getType hqsrc currentBranch0
, BranchUtil.getType hqProposedDest root0
) of
(null -> True, _) -> Nothing -- missing src
(rsrcs, existing) -> -- happy path
Just . map addAlias . toList $ Set.difference rsrcs existing
where
addAlias r = BranchUtil.makeAddTypeName proposedDest r (oldMD r)
oldMD r = BranchUtil.getTypeMetadataAt src r currentBranch0
doTerm :: Maybe [(Path, Branch0 m -> Branch0 m)]
doTerm = case ( BranchUtil.getTerm hqsrc currentBranch0
, BranchUtil.getTerm hqProposedDest root0
) of
(null -> True, _) -> Nothing -- missing src
(rsrcs, existing) ->
Just . map addAlias . toList $ Set.difference rsrcs existing
where
addAlias r = BranchUtil.makeAddTermName proposedDest r (oldMD r)
oldMD r = BranchUtil.getTermMetadataAt src r currentBranch0
in case (doType, doTerm) of
(Nothing, Nothing) -> (missingSrcs :> hqsrc, actions)
(Just as, Nothing) -> (missingSrcs, actions ++ as)
(Nothing, Just as) -> (missingSrcs, actions ++ as)
(Just as1, Just as2) -> (missingSrcs, actions ++ as1 ++ as2)
fixupOutput :: Path.HQSplit -> HQ.HashQualified Name
fixupOutput = fmap Path.toName . HQ'.toHQ . Path.unsplitHQ
NamesI thing -> do
ns0 <- basicParseNames0
let ns = Names ns0 mempty
terms = Names3.lookupHQTerm thing ns
types = Names3.lookupHQType thing ns
printNames = Names basicPrettyPrintNames0 mempty
terms' :: Set (Referent, Set (HQ'.HashQualified Name))
terms' = Set.map go terms where
go r = (r, Names3.termName hqLength r printNames)
types' :: Set (Reference, Set (HQ'.HashQualified Name))
types' = Set.map go types where
go r = (r, Names3.typeName hqLength r printNames)
respond $ ListNames hqLength (toList types') (toList terms')
LinkI mdValue srcs -> do
manageLinks False srcs [mdValue] Metadata.insert
syncRoot
UnlinkI mdValue srcs -> do
manageLinks False srcs [mdValue] Metadata.delete
syncRoot
-- > links List.map (.Docs .English)
-- > links List.map -- give me all the
-- > links Optional License
LinksI src mdTypeStr -> unlessError do
(ppe, out) <- getLinks input src (Right mdTypeStr)
lift do
numberedArgs .= fmap (HQ.toString . view _1) out
respond $ ListOfLinks ppe out
DocsI src -> fileByName where
{- Given `docs foo`, we look for docs in 3 places, in this order:
(fileByName) First check the file for `foo.doc`, and if found do `display foo.doc`
(codebaseByMetadata) Next check for doc metadata linked to `foo` in the codebase
(codebaseByName) Lastly check for `foo.doc` in the codebase and if found do `display foo.doc`
-}
hq :: HQ.HashQualified Name
hq = let
hq' :: HQ'.HashQualified Name
hq' = Name.convert @Path.Path' @Name <$> Name.convert src
in Name.convert hq'
dotDoc :: HQ.HashQualified Name
dotDoc = hq <&> \n -> Name.joinDot n "doc"
fileByName = do
ns <- maybe mempty UF.typecheckedToNames0 <$> use latestTypecheckedFile
fnames <- pure $ Names3.Names ns mempty
case Names3.lookupHQTerm dotDoc fnames of
s | Set.size s == 1 -> do
-- the displayI command expects full term names, so we resolve
-- the hash back to its full name in the file
fname' <- pure $ Names3.longestTermName 10 (Set.findMin s) fnames
displayI ConsoleLocation fname'
_ -> codebaseByMetadata
codebaseByMetadata = unlessError do
(ppe, out) <- getLinks input src (Left $ Set.fromList [DD.docRef, DD.doc2Ref])
lift case out of
[] -> codebaseByName
[(_name, ref, _tm)] -> do
len <- eval BranchHashLength
let names = Names3.Names basicPrettyPrintNames0 mempty
let tm = Term.ref External ref
tm <- eval $ Evaluate1 (PPE.fromNames len names) True tm
case tm of
Left e -> respond (EvaluationFailure e)
Right tm -> doDisplay ConsoleLocation names (Term.unannotate tm)
out -> do
numberedArgs .= fmap (HQ.toString . view _1) out
respond $ ListOfLinks ppe out
codebaseByName = do
parseNames <- basicParseNames0
case Names3.lookupHQTerm dotDoc (Names3.Names parseNames mempty) of
s | Set.size s == 1 -> displayI ConsoleLocation dotDoc
| Set.size s == 0 -> respond $ ListOfLinks mempty []
| otherwise -> -- todo: return a list of links here too
respond $ ListOfLinks mempty []
CreateAuthorI authorNameSegment authorFullName -> do
initialBranch <- getAt currentPath'
AuthorInfo
guid@(guidRef, _, _)
author@(authorRef, _, _)
copyrightHolder@(copyrightHolderRef, _, _) <-
eval $ CreateAuthorInfo authorFullName
-- add the new definitions to the codebase and to the namespace
traverse_ (eval . uncurry3 PutTerm) [guid, author, copyrightHolder]
stepManyAt
[ BranchUtil.makeAddTermName (resolveSplit' authorPath) (d authorRef) mempty
, BranchUtil.makeAddTermName (resolveSplit' copyrightHolderPath) (d copyrightHolderRef) mempty
, BranchUtil.makeAddTermName (resolveSplit' guidPath) (d guidRef) mempty
]
finalBranch <- getAt currentPath'
-- print some output
diffHelper (Branch.head initialBranch) (Branch.head finalBranch) >>=
respondNumbered
. uncurry (ShowDiffAfterCreateAuthor
authorNameSegment
(Path.unsplit' base)
currentPath')
where
d :: Reference.Id -> Referent
d = Referent.Ref . Reference.DerivedId
base :: Path.Split' = (Path.relativeEmpty', "metadata")
authorPath = base |> "authors" |> authorNameSegment
copyrightHolderPath = base |> "copyrightHolders" |> authorNameSegment
guidPath = authorPath |> "guid"
MoveTermI src dest ->
case (toList (getHQ'Terms src), toList (getTerms dest)) of
([r], []) -> do
stepManyAt
[ BranchUtil.makeDeleteTermName p r
, BranchUtil.makeAddTermName (resolveSplit' dest) r (mdSrc r)]
success
([_], rs) -> termExists dest (Set.fromList rs)
([], _) -> termNotFound src
(rs, _) -> termConflicted src (Set.fromList rs)
where p = resolveSplit' (HQ'.toName <$> src)
mdSrc r = BranchUtil.getTermMetadataAt p r root0
MoveTypeI src dest ->
case (toList (getHQ'Types src), toList (getTypes dest)) of
([r], []) -> do
stepManyAt
[ BranchUtil.makeDeleteTypeName p r
, BranchUtil.makeAddTypeName (resolveSplit' dest) r (mdSrc r) ]
success
([_], rs) -> typeExists dest (Set.fromList rs)
([], _) -> typeNotFound src
(rs, _) -> typeConflicted src (Set.fromList rs)
where
p = resolveSplit' (HQ'.toName <$> src)
mdSrc r = BranchUtil.getTypeMetadataAt p r root0
DeleteI hq -> delete getHQ'Terms getHQ'Types hq
DeleteTypeI hq -> delete (const Set.empty) getHQ'Types hq
DeleteTermI hq -> delete getHQ'Terms (const Set.empty) hq
DisplayI outputLoc hq -> displayI outputLoc hq
ShowDefinitionI outputLoc query -> do
res <- eval $ GetDefinitionsBySuffixes (Just currentPath'') root' query
case res of
Left e -> handleBackendError e
Right (Backend.DefinitionResults terms types misses) -> do
let loc = case outputLoc of
ConsoleLocation -> Nothing
FileLocation path -> Just path
LatestFileLocation ->
fmap fst latestFile' <|> Just "scratch.u"
printNames =
Backend.getCurrentPrettyNames currentPath'' root'
ppe = PPE.fromNamesDecl hqLength printNames
unless (null types && null terms) $
eval . Notify $
DisplayDefinitions loc ppe types terms
unless (null misses) $
eval . Notify $ SearchTermsNotFound misses
-- We set latestFile to be programmatically generated, if we
-- are viewing these definitions to a file - this will skip the
-- next update for that file (which will happen immediately)
latestFile .= ((, True) <$> loc)
FindPatchI -> do
let patches =
[ Path.toName $ Path.snoc p seg
| (p, b) <- Branch.toList0 currentBranch0
, (seg, _) <- Map.toList (Branch._edits b) ]
respond $ ListOfPatches $ Set.fromList patches
numberedArgs .= fmap Name.toString patches
FindShallowI pathArg -> do
let pathArgAbs = resolveToAbsolute pathArg
ppe = Backend.basicSuffixifiedNames
sbhLength
root'
(Path.fromPath' pathArg)
res <- eval $ FindShallow pathArgAbs
case res of
Left e -> handleBackendError e
Right entries -> do
-- caching the result as an absolute path, for easier jumping around
numberedArgs .= fmap entryToHQString entries
respond $ ListShallow ppe entries
where
entryToHQString :: ShallowListEntry v Ann -> String
entryToHQString e =
fixup $ case e of
ShallowTypeEntry (TypeEntry _ hq _) -> HQ'.toString hq
ShallowTermEntry (TermEntry _ hq _ _) -> HQ'.toString hq
ShallowBranchEntry ns _ _ -> NameSegment.toString ns
ShallowPatchEntry ns -> NameSegment.toString ns
where
fixup s = case pathArgStr of
"" -> s
p | last p == '.' -> p ++ s
p -> p ++ "." ++ s
pathArgStr = show pathArg
SearchByNameI isVerbose _showAll ws -> do
let prettyPrintNames0 = basicPrettyPrintNames0
unlessError do
results <- case ws of
-- no query, list everything
[] -> pure . listBranch $ Branch.head currentBranch'
-- type query
":" : ws ->
ExceptT (parseSearchType input (unwords ws)) >>= \typ ->
ExceptT $ do
let named = Branch.deepReferents root0
matches <- fmap toList . eval $ GetTermsOfType typ
matches <- filter (`Set.member` named) <$>
if null matches then do
respond NoExactTypeMatches
fmap toList . eval $ GetTermsMentioningType typ
else pure matches
let results =
-- in verbose mode, aliases are shown, so we collapse all
-- aliases to a single search result; in non-verbose mode,
-- a separate result may be shown for each alias
(if isVerbose then uniqueBy SR.toReferent else id) $
searchResultsFor prettyPrintNames0 matches []
pure . pure $ results
-- name query
(map HQ.unsafeFromString -> qs) -> do
let ns = basicPrettyPrintNames0
let srs = searchBranchScored ns fuzzyNameDistance qs
pure $ uniqueBy SR.toReferent srs
lift do
numberedArgs .= fmap searchResultToHQString results
results' <- loadSearchResults results
ppe <- suffixifiedPPE =<<
makePrintNamesFromLabeled'
(foldMap SR'.labeledDependencies results')
respond $ ListOfDefinitions ppe isVerbose results'
ResolveTypeNameI hq ->
zeroOneOrMore (getHQ'Types hq) (typeNotFound hq) go (typeConflicted hq)
where
conflicted = getHQ'Types (fmap HQ'.toNameOnly hq)
makeDelete =
BranchUtil.makeDeleteTypeName (resolveSplit' (HQ'.toName <$> hq))
go r = stepManyAt . fmap makeDelete . toList . Set.delete r $ conflicted
ResolveTermNameI hq -> do
refs <- getHQ'TermsIncludingHistorical hq
zeroOneOrMore refs (termNotFound hq) go (termConflicted hq)
where
conflicted = getHQ'Terms (fmap HQ'.toNameOnly hq)
makeDelete =
BranchUtil.makeDeleteTermName (resolveSplit' (HQ'.toName <$> hq))
go r = stepManyAt . fmap makeDelete . toList . Set.delete r $ conflicted
ReplaceI from to patchPath -> do
let patchPath' = fromMaybe defaultPatchPath patchPath
patch <- getPatchAt patchPath'
QueryResult fromMisses' fromHits <- hqNameQuery [from]
QueryResult toMisses' toHits <- hqNameQuery [to]
let termsFromRefs = termReferences fromHits
termsToRefs = termReferences toHits
typesFromRefs = typeReferences fromHits
typesToRefs = typeReferences toHits
--- Here are all the kinds of misses
--- [X] [X]
--- [Type] [Term]
--- [Term] [Type]
--- [Type] [X]
--- [Term] [X]
--- [X] [Type]
--- [X] [Term]
-- Type hits are term misses
termFromMisses = fromMisses'
<> (HQ'.toHQ . SR.typeName <$> typeResults fromHits)
termToMisses = toMisses'
<> (HQ'.toHQ . SR.typeName <$> typeResults toHits)
-- Term hits are type misses
typeFromMisses = fromMisses'
<> (HQ'.toHQ . SR.termName <$> termResults fromHits)
typeToMisses = toMisses'
<> (HQ'.toHQ . SR.termName <$> termResults toHits)
termMisses = termFromMisses <> termToMisses
typeMisses = typeFromMisses <> typeToMisses
replaceTerms :: Reference
-> Reference
-> Action m (Either Event Input) v ()
replaceTerms fr tr = do
mft <- eval $ LoadTypeOfTerm fr
mtt <- eval $ LoadTypeOfTerm tr
let termNotFound = respond . TermNotFound'
. SH.take hqLength
. Reference.toShortHash
case (mft, mtt) of
(Nothing, _) -> termNotFound fr
(_, Nothing) -> termNotFound tr
(Just ft, Just tt) -> do
let
patch' =
-- The modified patch
over Patch.termEdits
(R.insert fr (Replace tr (TermEdit.typing tt ft))
. R.deleteDom fr)
patch
(patchPath'', patchName) = resolveSplit' patchPath'
saveAndApplyPatch patchPath'' patchName patch'
replaceTypes :: Reference
-> Reference
-> Action m (Either Event Input) v ()
replaceTypes fr tr = do
let patch' =
-- The modified patch
over Patch.typeEdits
(R.insert fr (TypeEdit.Replace tr) . R.deleteDom fr) patch
(patchPath'', patchName) = resolveSplit' patchPath'
saveAndApplyPatch patchPath'' patchName patch'
ambiguous t rs =
let rs' = Set.map Referent.Ref $ Set.fromList rs
in case t of
HQ.HashOnly h ->
hashConflicted h rs'
(Path.parseHQSplit' . HQ.toString -> Right n) ->
termConflicted n rs'
_ -> respond . BadName $ HQ.toString t
mismatch typeName termName = respond $ TypeTermMismatch typeName termName
case (termsFromRefs, termsToRefs, typesFromRefs, typesToRefs) of
([], [], [], []) -> respond $ SearchTermsNotFound termMisses
([_], [], [], [_]) -> mismatch to from
([], [_], [_], []) -> mismatch from to
([_], [], _, _) -> respond $ SearchTermsNotFound termMisses
([], [_], _, _) -> respond $ SearchTermsNotFound termMisses
(_, _, [_], []) -> respond $ SearchTermsNotFound typeMisses
(_, _, [], [_]) -> respond $ SearchTermsNotFound typeMisses
([fr], [tr], [], []) -> replaceTerms fr tr
([], [], [fr], [tr]) -> replaceTypes fr tr
(froms, [_], [], []) -> ambiguous from froms
([], [], froms, [_]) -> ambiguous from froms
([_], tos, [], []) -> ambiguous to tos
([], [], [_], tos) -> ambiguous to tos
(_, _, _, _) -> error "unpossible"
LoadI maybePath ->
case maybePath <|> (fst <$> latestFile') of
Nothing -> respond NoUnisonFile
Just path -> do
res <- eval . LoadSource . Text.pack $ path
case res of
InvalidSourceNameError -> respond $ InvalidSourceName path
LoadError -> respond $ SourceLoadFailed path
LoadSuccess contents -> loadUnisonFile (Text.pack path) contents
AddI hqs -> case uf of
Nothing -> respond NoUnisonFile
Just uf -> do
sr <- Slurp.disallowUpdates
. applySelection hqs uf
. toSlurpResult currentPath' uf
<$> slurpResultNames0
let adds = Slurp.adds sr
when (Slurp.isNonempty sr) $ do
stepAtNoSync ( Path.unabsolute currentPath'
, doSlurpAdds adds uf)
eval . AddDefsToCodebase . filterBySlurpResult sr $ uf
ppe <- prettyPrintEnvDecl =<< displayNames uf
respond $ SlurpOutput input (PPE.suffixifiedPPE ppe) sr
addDefaultMetadata adds
syncRoot
PreviewAddI hqs -> case (latestFile', uf) of
(Just (sourceName, _), Just uf) -> do
sr <- Slurp.disallowUpdates
. applySelection hqs uf
. toSlurpResult currentPath' uf
<$> slurpResultNames0
previewResponse sourceName sr uf
_ -> respond NoUnisonFile
UpdateI maybePatchPath hqs -> case uf of
Nothing -> respond NoUnisonFile
Just uf -> do
let patchPath = fromMaybe defaultPatchPath maybePatchPath
slurpCheckNames0 <- slurpResultNames0
currentPathNames0 <- currentPathNames0
let sr = applySelection hqs uf
. toSlurpResult currentPath' uf
$ slurpCheckNames0
addsAndUpdates = Slurp.updates sr <> Slurp.adds sr
fileNames0 = UF.typecheckedToNames0 uf
-- todo: display some error if typeEdits or termEdits itself contains a loop
typeEdits :: Map Name (Reference, Reference)
typeEdits = Map.fromList $ map f (toList $ SC.types (updates sr)) where
f v = case (toList (Names.typesNamed slurpCheckNames0 n)
,toList (Names.typesNamed fileNames0 n)) of
([old],[new]) -> (n, (old, new))
_ -> error $ "Expected unique matches for "
++ Var.nameStr v ++ " but got: "
++ show otherwise
where n = Name.fromVar v
hashTerms :: Map Reference (Type v Ann)
hashTerms = Map.fromList (toList hashTerms0) where
hashTerms0 = (\(r, _, typ) -> (r, typ)) <$> UF.hashTerms uf
termEdits :: Map Name (Reference, Reference)
termEdits = Map.fromList $ map g (toList $ SC.terms (updates sr)) where
g v = case ( toList (Names.refTermsNamed slurpCheckNames0 n)
, toList (Names.refTermsNamed fileNames0 n)) of
([old], [new]) -> (n, (old, new))
_ -> error $ "Expected unique matches for "
++ Var.nameStr v ++ " but got: "
++ show otherwise
where n = Name.fromVar v
termDeprecations :: [(Name, Referent)]
termDeprecations =
[ (n, r) | (oldTypeRef,_) <- Map.elems typeEdits
, (n, r) <- Names3.constructorsForType0 oldTypeRef currentPathNames0 ]
ye'ol'Patch <- getPatchAt patchPath
-- If `uf` updates a -> a', we want to replace all (a0 -> a) in patch
-- with (a0 -> a') in patch'.
-- So for all (a0 -> a) in patch, for all (a -> a') in `uf`,
-- we must know the type of a0, a, a'.
let
-- we need:
-- all of the `old` references from the `new` edits,
-- plus all of the `old` references for edits from patch we're replacing
collectOldForTyping :: [(Reference, Reference)] -> Patch -> Set Reference
collectOldForTyping new old = foldl' f mempty (new ++ fromOld) where
f acc (r, _r') = Set.insert r acc
newLHS = Set.fromList . fmap fst $ new
fromOld :: [(Reference, Reference)]
fromOld = [ (r,r') | (r, TermEdit.Replace r' _) <- R.toList . Patch._termEdits $ old
, Set.member r' newLHS ]
neededTypes = collectOldForTyping (toList termEdits) ye'ol'Patch
allTypes :: Map Reference (Type v Ann) <-
fmap Map.fromList . for (toList neededTypes) $ \r ->
(r,) . fromMaybe (Type.builtin External "unknown type")
<$> (eval . LoadTypeOfTerm) r
let typing r1 r2 = case (Map.lookup r1 allTypes, Map.lookup r2 hashTerms) of
(Just t1, Just t2)
| Typechecker.isEqual t1 t2 -> TermEdit.Same
| Typechecker.isSubtype t1 t2 -> TermEdit.Subtype
| otherwise -> TermEdit.Different
e -> error $ "compiler bug: typing map not constructed properly\n" <>
"typing " <> show r1 <> " " <> show r2 <> " : " <> show e
let updatePatch :: Patch -> Patch
updatePatch p = foldl' step2 p' termEdits
where
p' = foldl' step1 p typeEdits
step1 p (r,r') = Patch.updateType r (TypeEdit.Replace r') p
step2 p (r,r') = Patch.updateTerm typing r (TermEdit.Replace r' (typing r r')) p
(p, seg) = Path.toAbsoluteSplit currentPath' patchPath
updatePatches :: Branch0 m -> m (Branch0 m)
updatePatches = Branch.modifyPatches seg updatePatch
when (Slurp.isNonempty sr) $ do
-- take a look at the `updates` from the SlurpResult
-- and make a patch diff to record a replacement from the old to new references
stepManyAtMNoSync
[( Path.unabsolute currentPath'
, pure . doSlurpUpdates typeEdits termEdits termDeprecations)
,( Path.unabsolute currentPath'
, pure . doSlurpAdds addsAndUpdates uf)
,( Path.unabsolute p, updatePatches )]
eval . AddDefsToCodebase . filterBySlurpResult sr $ uf
ppe <- prettyPrintEnvDecl =<< displayNames uf
respond $ SlurpOutput input (PPE.suffixifiedPPE ppe) sr
-- propagatePatch prints TodoOutput
void $ propagatePatchNoSync (updatePatch ye'ol'Patch) currentPath'
addDefaultMetadata addsAndUpdates
syncRoot
PreviewUpdateI hqs -> case (latestFile', uf) of
(Just (sourceName, _), Just uf) -> do
sr <- applySelection hqs uf
. toSlurpResult currentPath' uf
<$> slurpResultNames0
previewResponse sourceName sr uf
_ -> respond NoUnisonFile
TodoI patchPath branchPath' -> do
patch <- getPatchAt (fromMaybe defaultPatchPath patchPath)
doShowTodoOutput patch $ resolveToAbsolute branchPath'
TestI showOk showFail -> do
let
testTerms = Map.keys . R4.d1 . uncurry R4.selectD34 isTest
. Branch.deepTermMetadata $ currentBranch0
testRefs = Set.fromList [ r | Referent.Ref r <- toList testTerms ]
oks results =
[ (r, msg)
| (r, Term.List' ts) <- Map.toList results
, Term.App' (Term.Constructor' ref cid) (Term.Text' msg) <- toList ts
, cid == DD.okConstructorId && ref == DD.testResultRef ]
fails results =
[ (r, msg)
| (r, Term.List' ts) <- Map.toList results
, Term.App' (Term.Constructor' ref cid) (Term.Text' msg) <- toList ts
, cid == DD.failConstructorId && ref == DD.testResultRef ]
cachedTests <- fmap Map.fromList . eval $ LoadWatches UF.TestWatch testRefs
let stats = Output.CachedTests (Set.size testRefs) (Map.size cachedTests)
names <- makePrintNamesFromLabeled' $
LD.referents testTerms <>
LD.referents [ DD.okConstructorReferent, DD.failConstructorReferent ]
ppe <- fqnPPE names
respond $ TestResults stats ppe showOk showFail
(oks cachedTests) (fails cachedTests)
let toCompute = Set.difference testRefs (Map.keysSet cachedTests)
unless (Set.null toCompute) $ do
let total = Set.size toCompute
computedTests <- fmap join . for (toList toCompute `zip` [1..]) $ \(r,n) ->
case r of
Reference.DerivedId rid -> do
tm <- eval $ LoadTerm rid
case tm of
Nothing -> [] <$ respond (TermNotFound' . SH.take hqLength . Reference.toShortHash $ Reference.DerivedId rid)
Just tm -> do
respond $ TestIncrementalOutputStart ppe (n,total) r tm
-- v don't cache; test cache populated below
tm' <- eval $ Evaluate1 ppe False tm
case tm' of
Left e -> respond (EvaluationFailure e) $> []
Right tm' -> do
-- After evaluation, cache the result of the test
eval $ PutWatch UF.TestWatch rid tm'
respond $ TestIncrementalOutputEnd ppe (n,total) r tm'
pure [(r, tm')]
r -> error $ "unpossible, tests can't be builtins: " <> show r
let m = Map.fromList computedTests
respond $ TestResults Output.NewlyComputed ppe showOk showFail (oks m) (fails m)
-- ListBranchesI ->
-- eval ListBranches >>= respond . ListOfBranches currentBranchName'
-- DeleteBranchI branchNames -> withBranches branchNames $ \bnbs -> do
-- uniqueToDelete <- prettyUniqueDefinitions bnbs
-- let deleteBranches b =
-- traverse (eval . DeleteBranch) b >> respond (Success input)
-- if (currentBranchName' `elem` branchNames)
-- then respond DeletingCurrentBranch
-- else if null uniqueToDelete
-- then deleteBranches branchNames
-- else ifM (confirmedCommand input)
-- (deleteBranches branchNames)
-- (respond . DeleteBranchConfirmation $ uniqueToDelete)
PropagatePatchI patchPath scopePath -> do
patch <- getPatchAt patchPath
updated <- propagatePatch inputDescription patch (resolveToAbsolute scopePath)
unless updated (respond $ NothingToPatch patchPath scopePath)
ExecuteI main -> addRunMain main uf >>= \case
NoTermWithThatName -> do
ppe <- suffixifiedPPE (Names3.Names basicPrettyPrintNames0 mempty)
mainType <- eval RuntimeMain
respond $ NoMainFunction main ppe [mainType]
TermHasBadType ty -> do
ppe <- suffixifiedPPE (Names3.Names basicPrettyPrintNames0 mempty)
mainType <- eval RuntimeMain
respond $ BadMainFunction main ty ppe [mainType]
RunMainSuccess unisonFile -> do
ppe <- executePPE unisonFile
e <- eval $ Execute ppe unisonFile
case e of
Left e -> respond $ EvaluationFailure e
Right _ -> pure () -- TODO
IOTestI main -> do
-- todo - allow this to run tests from scratch file, using addRunMain
testType <- eval RuntimeTest
parseNames <- (`Names3.Names` mempty) <$> basicPrettyPrintNames0A
ppe <- suffixifiedPPE parseNames
-- use suffixed names for resolving the argument to display
let
oks results =
[ (r, msg)
| (r, Term.List' ts) <- results
, Term.App' (Term.Constructor' ref cid) (Term.Text' msg) <- toList ts
, cid == DD.okConstructorId && ref == DD.testResultRef ]
fails results =
[ (r, msg)
| (r, Term.List' ts) <- results
, Term.App' (Term.Constructor' ref cid) (Term.Text' msg) <- toList ts
, cid == DD.failConstructorId && ref == DD.testResultRef ]
results = Names3.lookupHQTerm main parseNames in
case toList results of
[Referent.Ref ref] -> do
typ <- loadTypeOfTerm (Referent.Ref ref)
case typ of
Just typ | Typechecker.isSubtype typ testType -> do
let a = ABT.annotation tm
tm = DD.forceTerm a a (Term.ref a ref) in do
-- v Don't cache IO tests
tm' <- eval $ Evaluate1 ppe False tm
case tm' of
Left e -> respond (EvaluationFailure e)
Right tm' ->
respond $ TestResults Output.NewlyComputed ppe True True (oks [(ref, tm')]) (fails [(ref, tm')])
_ -> respond $ NoMainFunction (HQ.toString main) ppe [testType]
_ -> respond $ NoMainFunction (HQ.toString main) ppe [testType]
-- UpdateBuiltinsI -> do
-- stepAt updateBuiltins
-- checkTodo
MergeBuiltinsI -> do
-- these were added once, but maybe they've changed and need to be
-- added again.
let uf = UF.typecheckedUnisonFile (Map.fromList Builtin.builtinDataDecls)
(Map.fromList Builtin.builtinEffectDecls)
[Builtin.builtinTermsSrc Intrinsic]
mempty
eval $ AddDefsToCodebase uf
-- add the names; note, there are more names than definitions
-- due to builtin terms; so we don't just reuse `uf` above.
let srcb = BranchUtil.fromNames0 Builtin.names0
_ <- updateAtM (currentPath' `snoc` "builtin") $ \destb ->
eval $ Merge Branch.RegularMerge srcb destb
success
MergeIOBuiltinsI -> do
-- these were added once, but maybe they've changed and need to be
-- added again.
let uf = UF.typecheckedUnisonFile (Map.fromList Builtin.builtinDataDecls)
(Map.fromList Builtin.builtinEffectDecls)
[Builtin.builtinTermsSrc Intrinsic]
mempty
eval $ AddDefsToCodebase uf
-- these have not necessarily been added yet
eval $ AddDefsToCodebase IOSource.typecheckedFile'
-- add the names; note, there are more names than definitions
-- due to builtin terms; so we don't just reuse `uf` above.
let names0 = Builtin.names0
<> UF.typecheckedToNames0 @v IOSource.typecheckedFile'
let srcb = BranchUtil.fromNames0 names0
_ <- updateAtM (currentPath' `snoc` "builtin") $ \destb ->
eval $ Merge Branch.RegularMerge srcb destb
success
ListEditsI maybePath -> do
let (p, seg) =
maybe (Path.toAbsoluteSplit currentPath' defaultPatchPath)
(Path.toAbsoluteSplit currentPath')
maybePath
patch <- eval . Eval . Branch.getPatch seg . Branch.head =<< getAt p
ppe <- suffixifiedPPE =<<
makePrintNamesFromLabeled' (Patch.labeledDependencies patch)
respond $ ListEdits patch ppe
PullRemoteBranchI mayRepo path syncMode -> unlessError do
ns <- maybe (writePathToRead <$> resolveConfiguredGitUrl Pull path) pure mayRepo
lift $ unlessGitError do
b <- importRemoteBranch ns syncMode
let msg = Just $ PullAlreadyUpToDate ns path
let destAbs = resolveToAbsolute path
lift $ mergeBranchAndPropagateDefaultPatch Branch.RegularMerge inputDescription msg b (Just path) destAbs
PushRemoteBranchI mayRepo path syncMode -> do
let srcAbs = resolveToAbsolute path
srcb <- getAt srcAbs
unlessError do
(repo, remotePath) <- maybe (resolveConfiguredGitUrl Push path) pure mayRepo
lift $ unlessGitError do
(cleanup, remoteRoot) <- unsafeTime "Push viewRemoteBranch" $
viewRemoteBranch (writeToRead repo, Nothing, Path.empty)
-- We don't merge `srcb` with the remote namespace, `r`, we just
-- replace it. The push will be rejected if this rewinds time
-- or misses any new updates in `r` that aren't in `srcb` already.
let newRemoteRoot = Branch.modifyAt remotePath (const srcb) remoteRoot
unsafeTime "Push syncRemoteRootBranch" $
syncRemoteRootBranch repo newRemoteRoot syncMode
lift . eval $ Eval cleanup
lift $ respond Success
ListDependentsI hq -> -- todo: add flag to handle transitive efficiently
resolveHQToLabeledDependencies hq >>= \lds ->
if null lds
then respond $ LabeledReferenceNotFound hq
else for_ lds $ \ld -> do
dependents <- let
tp r = eval $ GetDependents r
tm (Referent.Ref r) = eval $ GetDependents r
tm (Referent.Con r _i _ct) = eval $ GetDependents r
in LD.fold tp tm ld
(missing, names0) <- eval . Eval $ Branch.findHistoricalRefs' dependents root'
let types = R.toList $ Names3.types0 names0
let terms = fmap (second Referent.toReference) $ R.toList $ Names.terms names0
let names = types <> terms
numberedArgs .= fmap (Text.unpack . Reference.toText) ((fmap snd names) <> toList missing)
respond $ ListDependents hqLength ld names missing
ListDependenciesI hq -> -- todo: add flag to handle transitive efficiently
resolveHQToLabeledDependencies hq >>= \lds ->
if null lds
then respond $ LabeledReferenceNotFound hq
else for_ lds $ \ld -> do
dependencies :: Set Reference <- let
tp r@(Reference.DerivedId i) = eval (LoadType i) <&> \case
Nothing -> error $ "What happened to " ++ show i ++ "?"
Just decl -> Set.delete r . DD.dependencies $ DD.asDataDecl decl
tp _ = pure mempty
tm (Referent.Ref r@(Reference.DerivedId i)) = eval (LoadTerm i) <&> \case
Nothing -> error $ "What happened to " ++ show i ++ "?"
Just tm -> Set.delete r $ Term.dependencies tm
tm con@(Referent.Con (Reference.DerivedId i) cid _ct) = eval (LoadType i) <&> \case
Nothing -> error $ "What happened to " ++ show i ++ "?"
Just decl -> case DD.typeOfConstructor (DD.asDataDecl decl) cid of
Nothing -> error $ "What happened to " ++ show con ++ "?"
Just tp -> Type.dependencies tp
tm _ = pure mempty
in LD.fold tp tm ld
(missing, names0) <- eval . Eval $ Branch.findHistoricalRefs' dependencies root'
let types = R.toList $ Names3.types0 names0
let terms = fmap (second Referent.toReference) $ R.toList $ Names.terms names0
let names = types <> terms
numberedArgs .= fmap (Text.unpack . Reference.toText) ((fmap snd names) <> toList missing)
respond $ ListDependencies hqLength ld names missing
DebugNumberedArgsI -> use numberedArgs >>= respond . DumpNumberedArgs
DebugBranchHistoryI ->
eval . Notify . DumpBitBooster (Branch.headHash currentBranch') =<<
(eval . Eval $ Causal.hashToRaw (Branch._history currentBranch'))
DebugTypecheckedUnisonFileI -> case uf of
Nothing -> respond NoUnisonFile
Just uf -> let
datas, effects, terms :: [(Name, Reference.Id)]
datas = [ (Name.fromVar v, r) | (v, (r, _d)) <- Map.toList $ UF.dataDeclarationsId' uf ]
effects = [ (Name.fromVar v, r) | (v, (r, _e)) <- Map.toList $ UF.effectDeclarationsId' uf ]
terms = [ (Name.fromVar v, r) | (v, (r, _tm, _tp)) <- Map.toList $ UF.hashTermsId uf ]
in eval . Notify $ DumpUnisonFileHashes hqLength datas effects terms
DebugDumpNamespacesI -> do
let seen h = State.gets (Set.member h)
set h = State.modify (Set.insert h)
getCausal b = (Branch.headHash b, pure $ Branch._history b)
goCausal :: forall m. Monad m => [(Branch.Hash, m (Branch.UnwrappedBranch m))] -> StateT (Set Branch.Hash) m ()
goCausal [] = pure ()
goCausal ((h, mc) : queue) = do
ifM (seen h) (goCausal queue) do
lift mc >>= \case
Causal.One h b -> goBranch h b mempty queue
Causal.Cons h b tail -> goBranch h b [fst tail] (tail : queue)
Causal.Merge h b (Map.toList -> tails) -> goBranch h b (map fst tails) (tails ++ queue)
goBranch :: forall m. Monad m => Branch.Hash -> Branch0 m -> [Branch.Hash] -> [(Branch.Hash, m (Branch.UnwrappedBranch m))] -> StateT (Set Branch.Hash) m ()
goBranch h b (Set.fromList -> causalParents) queue = case b of
Branch0 terms0 types0 children0 patches0 _ _ _ _ _ _ -> let
wrangleMetadata :: (Ord r, Ord n) => Metadata.Star r n -> r -> (r, (Set n, Set Metadata.Value))
wrangleMetadata s r =
(r, (R.lookupDom r $ Star3.d1 s, Set.map snd . R.lookupDom r $ Star3.d3 s))
terms = Map.fromList . map (wrangleMetadata terms0) . Foldable.toList $ Star3.fact terms0
types = Map.fromList . map (wrangleMetadata types0) . Foldable.toList $ Star3.fact types0
patches = fmap fst patches0
children = fmap Branch.headHash children0
in do
let d = Output.DN.DumpNamespace terms types patches children causalParents
-- the alternate implementation that doesn't rely on `traceM` blows up
traceM $ P.toPlain 200 (prettyDump (h, d))
set h
goCausal (map getCausal (Foldable.toList children0) ++ queue)
prettyDump (h, Output.DN.DumpNamespace terms types patches children causalParents) =
P.lit "Namespace " <> P.shown h <> P.newline <> (P.indentN 2 $ P.linesNonEmpty [
Monoid.unlessM (null causalParents) $ P.lit "Causal Parents:" <> P.newline <> P.indentN 2 (P.lines (map P.shown $ Set.toList causalParents))
, Monoid.unlessM (null terms) $ P.lit "Terms:" <> P.newline <> P.indentN 2 (P.lines (map (prettyDefn Referent.toText) $ Map.toList terms))
, Monoid.unlessM (null types) $ P.lit "Types:" <> P.newline <> P.indentN 2 (P.lines (map (prettyDefn Reference.toText) $ Map.toList types))
, Monoid.unlessM (null patches) $ P.lit "Patches:" <> P.newline <> P.indentN 2 (P.column2 (map (bimap P.shown P.shown) $ Map.toList patches))
, Monoid.unlessM (null children) $ P.lit "Children:" <> P.newline <> P.indentN 2 (P.column2 (map (bimap P.shown P.shown) $ Map.toList children))
])
where
prettyLinks renderR r [] = P.indentN 2 $ P.text (renderR r)
prettyLinks renderR r links = P.indentN 2 (P.lines (P.text (renderR r) : (links <&> \r -> "+ " <> P.text (Reference.toText r))))
prettyDefn renderR (r, (Foldable.toList -> names, Foldable.toList -> links)) =
P.lines (P.shown <$> if null names then [NameSegment "<unnamed>"] else names) <> P.newline <> prettyLinks renderR r links
void . eval . Eval . flip State.execStateT mempty $ goCausal [getCausal root']
DebugDumpNamespaceSimpleI -> do
for_ (Relation.toList . Branch.deepTypes . Branch.head $ root') \(r, name) ->
traceM $ show name ++ ",Type," ++ Text.unpack (Reference.toText r)
for_ (Relation.toList . Branch.deepTerms . Branch.head $ root') \(r, name) ->
traceM $ show name ++ ",Term," ++ Text.unpack (Referent.toText r)
DebugClearWatchI {} -> eval ClearWatchCache
DeprecateTermI {} -> notImplemented
DeprecateTypeI {} -> notImplemented
RemoveTermReplacementI from patchPath ->
doRemoveReplacement from patchPath True
RemoveTypeReplacementI from patchPath ->
doRemoveReplacement from patchPath False
ShowDefinitionByPrefixI {} -> notImplemented
UpdateBuiltinsI -> notImplemented
QuitI -> MaybeT $ pure Nothing
where
notImplemented = eval $ Notify NotImplemented
success = respond Success
resolveDefaultMetadata :: Path.Absolute -> Action' m v [String]
resolveDefaultMetadata path = do
let superpaths = Path.ancestors path
xs <- for
superpaths
(\path -> do
mayNames <-
eval . ConfigLookup @[String] $ configKey "DefaultMetadata" path
pure . join $ toList mayNames
)
pure . join $ toList xs
configKey k p =
Text.intercalate "." . toList $ k :<| fmap
NameSegment.toText
(Path.toSeq $ Path.unabsolute p)
-- Takes a maybe (namespace address triple); returns it as-is if `Just`;
-- otherwise, tries to load a value from .unisonConfig, and complains
-- if needed.
resolveConfiguredGitUrl
:: PushPull
-> Path'
-> ExceptT (Output v) (Action' m v) WriteRemotePath
resolveConfiguredGitUrl pushPull destPath' = ExceptT do
let destPath = resolveToAbsolute destPath'
let configKey = gitUrlKey destPath
(eval . ConfigLookup) configKey >>= \case
Just url ->
case P.parse UriParser.writeRepoPath (Text.unpack configKey) url of
Left e ->
pure . Left $
ConfiguredGitUrlParseError pushPull destPath' url (show e)
Right ns ->
pure . Right $ ns
Nothing ->
pure . Left $ NoConfiguredGitUrl pushPull destPath'
gitUrlKey = configKey "GitUrl"
case e of
Right input -> lastInput .= Just input
_ -> pure ()
-- todo: compare to `getHQTerms` / `getHQTypes`. Is one universally better?
resolveHQToLabeledDependencies :: Functor m => HQ.HashQualified Name -> Action' m v (Set LabeledDependency)
resolveHQToLabeledDependencies = \case
HQ.NameOnly n -> do
parseNames <- basicParseNames0
let terms, types :: Set LabeledDependency
terms = Set.map LD.referent . Name.searchBySuffix n $ Names3.terms0 parseNames
types = Set.map LD.typeRef . Name.searchBySuffix n $ Names3.types0 parseNames
pure $ terms <> types
-- rationale: the hash should be unique enough that the name never helps
HQ.HashQualified _n sh -> resolveHashOnly sh
HQ.HashOnly sh -> resolveHashOnly sh
where
resolveHashOnly sh = do
terms <- eval $ TermReferentsByShortHash sh
types <- eval $ TypeReferencesByShortHash sh
pure $ Set.map LD.referent terms <> Set.map LD.typeRef types
doDisplay :: Var v => OutputLocation -> Names -> Term v () -> Action' m v ()
doDisplay outputLoc names tm = do
ppe <- prettyPrintEnvDecl names
tf <- use latestTypecheckedFile
let (tms, typs) = maybe mempty UF.indexByReference tf
latestFile' <- use latestFile
let
loc = case outputLoc of
ConsoleLocation -> Nothing
FileLocation path -> Just path
LatestFileLocation -> fmap fst latestFile' <|> Just "scratch.u"
useCache = True
evalTerm tm = fmap ErrorUtil.hush . fmap (fmap Term.unannotate) . eval $
Evaluate1 (PPE.suffixifiedPPE ppe) useCache (Term.amap (const External) tm)
loadTerm (Reference.DerivedId r) = case Map.lookup r tms of
Nothing -> fmap (fmap Term.unannotate) . eval $ LoadTerm r
Just (tm,_) -> pure (Just $ Term.unannotate tm)
loadTerm _ = pure Nothing
loadDecl (Reference.DerivedId r) = case Map.lookup r typs of
Nothing -> fmap (fmap $ DD.amap (const ())) . eval $ LoadType r
Just decl -> pure (Just $ DD.amap (const ()) decl)
loadDecl _ = pure Nothing
loadTypeOfTerm' (Referent.Ref (Reference.DerivedId r))
| Just (_,ty) <- Map.lookup r tms = pure $ Just (void ty)
loadTypeOfTerm' r = fmap (fmap void) . loadTypeOfTerm $ r
rendered <- DisplayValues.displayTerm ppe loadTerm loadTypeOfTerm' evalTerm loadDecl tm
respond $ DisplayRendered loc rendered
getLinks :: (Var v, Monad m)
=> Input
-> Path.HQSplit'
-> Either (Set Reference) (Maybe String)
-> ExceptT (Output v)
(Action' m v)
(PPE.PrettyPrintEnv,
-- e.g. ("Foo.doc", #foodoc, Just (#builtin.Doc)
[(HQ.HashQualified Name, Reference, Maybe (Type v Ann))])
getLinks input src mdTypeStr = ExceptT $ do
let go = fmap Right . getLinks' src
case mdTypeStr of
Left s -> go (Just s)
Right Nothing -> go Nothing
Right (Just mdTypeStr) -> parseType input mdTypeStr >>= \case
Left e -> pure $ Left e
Right typ -> go . Just . Set.singleton $ Type.toReference typ
getLinks' :: (Var v, Monad m)
=> Path.HQSplit' -- definition to print metadata of
-> Maybe (Set Reference) -- return all metadata if empty
-> Action' m v (PPE.PrettyPrintEnv,
-- e.g. ("Foo.doc", #foodoc, Just (#builtin.Doc)
[(HQ.HashQualified Name, Reference, Maybe (Type v Ann))])
getLinks' src selection0 = do
root0 <- Branch.head <$> use root
currentPath' <- use currentPath
let resolveSplit' = Path.fromAbsoluteSplit . Path.toAbsoluteSplit currentPath'
p = resolveSplit' src -- ex: the (parent,hqsegment) of `List.map` - `List`
-- all metadata (type+value) associated with name `src`
allMd = R4.d34 (BranchUtil.getTermMetadataHQNamed p root0)
<> R4.d34 (BranchUtil.getTypeMetadataHQNamed p root0)
allMd' = maybe allMd (`R.restrictDom` allMd) selection0
-- then list the values after filtering by type
allRefs :: Set Reference = R.ran allMd'
sigs <- for (toList allRefs) (loadTypeOfTerm . Referent.Ref)
let deps = Set.map LD.termRef allRefs <>
Set.unions [ Set.map LD.typeRef . Type.dependencies $ t | Just t <- sigs ]
ppe <- prettyPrintEnvDecl =<< makePrintNamesFromLabeled' deps
let ppeDecl = PPE.unsuffixifiedPPE ppe
let sortedSigs = sortOn snd (toList allRefs `zip` sigs)
let out = [(PPE.termName ppeDecl (Referent.Ref r), r, t) | (r, t) <- sortedSigs ]
pure (PPE.suffixifiedPPE ppe, out)
resolveShortBranchHash ::
ShortBranchHash -> ExceptT (Output v) (Action' m v) (Branch m)
resolveShortBranchHash hash = ExceptT do
hashSet <- eval $ BranchHashesByPrefix hash
len <- eval BranchHashLength
case Set.toList hashSet of
[] -> pure . Left $ NoBranchWithHash hash
[h] -> fmap Right . eval $ LoadLocalBranch h
_ -> pure . Left $ BranchHashAmbiguous hash (Set.map (SBH.fromHash len) hashSet)
-- Returns True if the operation changed the namespace, False otherwise.
propagatePatchNoSync
:: (Monad m, Var v)
=> Patch
-> Path.Absolute
-> Action' m v Bool
propagatePatchNoSync patch scopePath = do
r <- use root
let nroot = Branch.toNames0 (Branch.head r)
stepAtMNoSync' (Path.unabsolute scopePath,
lift . lift . Propagate.propagateAndApply nroot patch)
-- Returns True if the operation changed the namespace, False otherwise.
propagatePatch :: (Monad m, Var v) =>
InputDescription -> Patch -> Path.Absolute -> Action' m v Bool
propagatePatch inputDescription patch scopePath = do
r <- use root
let nroot = Branch.toNames0 (Branch.head r)
stepAtM' (inputDescription <> " (applying patch)")
(Path.unabsolute scopePath,
lift . lift . Propagate.propagateAndApply nroot patch)
-- | Create the args needed for showTodoOutput and call it
doShowTodoOutput :: Monad m => Patch -> Path.Absolute -> Action' m v ()
doShowTodoOutput patch scopePath = do
scope <- getAt scopePath
let names0 = Branch.toNames0 (Branch.head scope)
-- only needs the local references to check for obsolete defs
let getPpe = do
names <- makePrintNamesFromLabeled' (Patch.labeledDependencies patch)
prettyPrintEnvDecl names
showTodoOutput getPpe patch names0
-- | Show todo output if there are any conflicts or edits.
showTodoOutput
:: Action' m v PPE.PrettyPrintEnvDecl
-- ^ Action that fetches the pretty print env. It's expensive because it
-- involves looking up historical names, so only call it if necessary.
-> Patch
-> Names0
-> Action' m v ()
showTodoOutput getPpe patch names0 = do
todo <- checkTodo patch names0
if TO.noConflicts todo && TO.noEdits todo
then respond NoConflictsOrEdits
else do
numberedArgs .=
(Text.unpack . Reference.toText . view _2 <$>
fst (TO.todoFrontierDependents todo))
ppe <- getPpe
respond $ TodoOutput ppe todo
checkTodo :: Patch -> Names0 -> Action m i v (TO.TodoOutput v Ann)
checkTodo patch names0 = do
f <- computeFrontier (eval . GetDependents) patch names0
let dirty = R.dom f
frontier = R.ran f
(frontierTerms, frontierTypes) <- loadDisplayInfo frontier
(dirtyTerms, dirtyTypes) <- loadDisplayInfo dirty
-- todo: something more intelligent here?
let scoreFn = const 1
remainingTransitive <-
frontierTransitiveDependents (eval . GetDependents) names0 frontier
let
scoredDirtyTerms =
List.sortOn (view _1) [ (scoreFn r, r, t) | (r,t) <- dirtyTerms ]
scoredDirtyTypes =
List.sortOn (view _1) [ (scoreFn r, r, t) | (r,t) <- dirtyTypes ]
pure $
TO.TodoOutput
(Set.size remainingTransitive)
(frontierTerms, frontierTypes)
(scoredDirtyTerms, scoredDirtyTypes)
(Names.conflicts names0)
(Patch.conflicts patch)
where
frontierTransitiveDependents ::
Monad m => (Reference -> m (Set Reference)) -> Names0 -> Set Reference -> m (Set Reference)
frontierTransitiveDependents dependents names0 rs = do
let branchDependents r = Set.filter (Names.contains names0) <$> dependents r
tdeps <- transitiveClosure branchDependents rs
-- we don't want the frontier in the result
pure $ tdeps `Set.difference` rs
-- (d, f) when d is "dirty" (needs update),
-- f is in the frontier (an edited dependency of d),
-- and d depends on f
-- a ⋖ b = a depends directly on b
-- dirty(d) ∧ frontier(f) <=> not(edited(d)) ∧ edited(f) ∧ d ⋖ f
--
-- The range of this relation is the frontier, and the domain is
-- the set of dirty references.
computeFrontier :: forall m . Monad m
=> (Reference -> m (Set Reference)) -- eg Codebase.dependents codebase
-> Patch
-> Names0
-> m (R.Relation Reference Reference)
computeFrontier getDependents patch names = let
edited :: Set Reference
edited = R.dom (Patch._termEdits patch) <> R.dom (Patch._typeEdits patch)
addDependents :: R.Relation Reference Reference -> Reference -> m (R.Relation Reference Reference)
addDependents dependents ref =
(\ds -> R.insertManyDom ds ref dependents) . Set.filter (Names.contains names)
<$> getDependents ref
in do
-- (r,r2) ∈ dependsOn if r depends on r2
dependsOn <- foldM addDependents R.empty edited
-- Dirty is everything that `dependsOn` Frontier, minus already edited defns
pure $ R.filterDom (not . flip Set.member edited) dependsOn
eval :: Command m i v a -> Action m i v a
eval = lift . lift . Free.eval
confirmedCommand :: Input -> Action m i v Bool
confirmedCommand i = do
i0 <- use lastInput
pure $ Just i == i0
listBranch :: Branch0 m -> [SearchResult]
listBranch (Branch.toNames0 -> b) =
List.sortOn (\s -> (SR.name s, s)) (SR.fromNames b)
-- | restores the full hash to these search results, for _numberedArgs purposes
searchResultToHQString :: SearchResult -> String
searchResultToHQString = \case
SR.Tm' n r _ -> HQ'.toString $ HQ'.requalify n r
SR.Tp' n r _ -> HQ'.toString $ HQ'.requalify n (Referent.Ref r)
_ -> error "unpossible match failure"
-- Return a list of definitions whose names fuzzy match the given queries.
fuzzyNameDistance :: Name -> Name -> Maybe Int
fuzzyNameDistance (Name.toString -> q) (Name.toString -> n) =
Find.simpleFuzzyScore q n
-- return `name` and `name.<everything>...`
_searchBranchPrefix :: Branch m -> Name -> [SearchResult]
_searchBranchPrefix b n = case Path.unsnoc (Path.fromName n) of
Nothing -> []
Just (init, last) -> case Branch.getAt init b of
Nothing -> []
Just b -> SR.fromNames . Names.prefix0 n $ names0
where
lastName = Path.toName (Path.singleton last)
subnames = Branch.toNames0 . Branch.head $
Branch.getAt' (Path.singleton last) b
rootnames =
Names.filter (== lastName) .
Branch.toNames0 . set Branch.children mempty $ Branch.head b
names0 = rootnames <> Names.prefix0 lastName subnames
searchResultsFor :: Names0 -> [Referent] -> [Reference] -> [SearchResult]
searchResultsFor ns terms types =
[ SR.termSearchResult ns name ref
| ref <- terms
, name <- toList (Names.namesForReferent ns ref)
] <>
[ SR.typeSearchResult ns name ref
| ref <- types
, name <- toList (Names.namesForReference ns ref)
]
searchBranchScored :: forall score. (Ord score)
=> Names0
-> (Name -> Name -> Maybe score)
-> [HQ.HashQualified Name]
-> [SearchResult]
searchBranchScored names0 score queries =
nubOrd . fmap snd . toList $ searchTermNamespace <> searchTypeNamespace
where
searchTermNamespace = foldMap do1query queries
where
do1query :: HQ.HashQualified Name -> Set (Maybe score, SearchResult)
do1query q = foldMap (score1hq q) (R.toList . Names.terms $ names0)
score1hq :: HQ.HashQualified Name -> (Name, Referent) -> Set (Maybe score, SearchResult)
score1hq query (name, ref) = case query of
HQ.NameOnly qn ->
pair qn
HQ.HashQualified qn h | h `SH.isPrefixOf` Referent.toShortHash ref ->
pair qn
HQ.HashOnly h | h `SH.isPrefixOf` Referent.toShortHash ref ->
Set.singleton (Nothing, result)
_ -> mempty
where
result = SR.termSearchResult names0 name ref
pair qn = case score qn name of
Just score -> Set.singleton (Just score, result)
Nothing -> mempty
searchTypeNamespace = foldMap do1query queries
where
do1query :: HQ.HashQualified Name -> Set (Maybe score, SearchResult)
do1query q = foldMap (score1hq q) (R.toList . Names.types $ names0)
score1hq :: HQ.HashQualified Name -> (Name, Reference) -> Set (Maybe score, SearchResult)
score1hq query (name, ref) = case query of
HQ.NameOnly qn ->
pair qn
HQ.HashQualified qn h | h `SH.isPrefixOf` Reference.toShortHash ref ->
pair qn
HQ.HashOnly h | h `SH.isPrefixOf` Reference.toShortHash ref ->
Set.singleton (Nothing, result)
_ -> mempty
where
result = SR.typeSearchResult names0 name ref
pair qn = case score qn name of
Just score -> Set.singleton (Just score, result)
Nothing -> mempty
handleBackendError :: Backend.BackendError -> Action m i v ()
handleBackendError = \case
Backend.NoSuchNamespace path ->
respond . BranchNotFound $ Path.absoluteToPath' path
Backend.BadRootBranch e -> respond $ BadRootBranch e
Backend.NoBranchForHash h -> do
sbhLength <- eval BranchHashLength
respond . NoBranchWithHash $ SBH.fromHash sbhLength h
Backend.CouldntLoadBranch h -> do
respond . CouldntLoadBranch $ h
Backend.CouldntExpandBranchHash sbh -> respond $ NoBranchWithHash sbh
Backend.AmbiguousBranchHash h hashes ->
respond $ BranchHashAmbiguous h hashes
Backend.MissingSignatureForTerm r ->
respond $ TermMissingType r
respond :: Output v -> Action m i v ()
respond output = eval $ Notify output
respondNumbered :: NumberedOutput v -> Action m i v ()
respondNumbered output = do
args <- eval $ NotifyNumbered output
unless (null args) $
numberedArgs .= toList args
unlessError :: ExceptT (Output v) (Action' m v) () -> Action' m v ()
unlessError ma = runExceptT ma >>= either (eval . Notify) pure
unlessError' :: (e -> Output v) -> ExceptT e (Action' m v) () -> Action' m v ()
unlessError' f ma = unlessError $ withExceptT f ma
-- | supply `dest0` if you want to print diff messages
-- supply unchangedMessage if you want to display it if merge had no effect
mergeBranchAndPropagateDefaultPatch :: (Monad m, Var v) => Branch.MergeMode ->
InputDescription -> Maybe (Output v) -> Branch m -> Maybe Path.Path' -> Path.Absolute -> Action' m v ()
mergeBranchAndPropagateDefaultPatch mode inputDescription unchangedMessage srcb dest0 dest =
ifM (mergeBranch mode inputDescription srcb dest0 dest)
(loadPropagateDiffDefaultPatch inputDescription dest0 dest)
(for_ unchangedMessage respond)
where
mergeBranch :: (Monad m, Var v) =>
Branch.MergeMode -> InputDescription -> Branch m -> Maybe Path.Path' -> Path.Absolute -> Action' m v Bool
mergeBranch mode inputDescription srcb dest0 dest = unsafeTime "Merge Branch" $ do
destb <- getAt dest
merged <- eval $ Merge mode srcb destb
b <- updateAtM inputDescription dest (const $ pure merged)
for_ dest0 $ \dest0 ->
diffHelper (Branch.head destb) (Branch.head merged) >>=
respondNumbered . uncurry (ShowDiffAfterMerge dest0 dest)
pure b
loadPropagateDiffDefaultPatch :: (Monad m, Var v) =>
InputDescription -> Maybe Path.Path' -> Path.Absolute -> Action' m v ()
loadPropagateDiffDefaultPatch inputDescription dest0 dest = unsafeTime "Propagate Default Patch" $ do
original <- getAt dest
patch <- eval . Eval $ Branch.getPatch defaultPatchNameSegment (Branch.head original)
patchDidChange <- propagatePatch inputDescription patch dest
when patchDidChange . for_ dest0 $ \dest0 -> do
patched <- getAt dest
let patchPath = snoc dest0 defaultPatchNameSegment
diffHelper (Branch.head original) (Branch.head patched) >>=
respondNumbered . uncurry (ShowDiffAfterMergePropagate dest0 dest patchPath)
getAt :: Functor m => Path.Absolute -> Action m i v (Branch m)
getAt (Path.Absolute p) =
use root <&> fromMaybe Branch.empty . Branch.getAt p
-- Update a branch at the given path, returning `True` if
-- an update occurred and false otherwise
updateAtM :: Applicative m
=> InputDescription
-> Path.Absolute
-> (Branch m -> Action m i v (Branch m))
-> Action m i v Bool
updateAtM reason (Path.Absolute p) f = do
b <- use lastSavedRoot
b' <- Branch.modifyAtM p f b
updateRoot b' reason
pure $ b /= b'
stepAt
:: forall m i v
. Monad m
=> InputDescription
-> (Path, Branch0 m -> Branch0 m)
-> Action m i v ()
stepAt cause = stepManyAt @m @[] cause . pure
stepAtNoSync :: forall m i v. Monad m
=> (Path, Branch0 m -> Branch0 m)
-> Action m i v ()
stepAtNoSync = stepManyAtNoSync @m @[] . pure
stepAtM :: forall m i v. Monad m
=> InputDescription
-> (Path, Branch0 m -> m (Branch0 m))
-> Action m i v ()
stepAtM cause = stepManyAtM @m @[] cause . pure
stepAtM'
:: forall m i v
. Monad m
=> InputDescription
-> (Path, Branch0 m -> Action m i v (Branch0 m))
-> Action m i v Bool
stepAtM' cause = stepManyAtM' @m @[] cause . pure
stepAtMNoSync'
:: forall m i v
. Monad m
=> (Path, Branch0 m -> Action m i v (Branch0 m))
-> Action m i v Bool
stepAtMNoSync' = stepManyAtMNoSync' @m @[] . pure
stepManyAt
:: (Monad m, Foldable f)
=> InputDescription
-> f (Path, Branch0 m -> Branch0 m)
-> Action m i v ()
stepManyAt reason actions = do
stepManyAtNoSync actions
b <- use root
updateRoot b reason
-- Like stepManyAt, but doesn't update the root
stepManyAtNoSync
:: (Monad m, Foldable f)
=> f (Path, Branch0 m -> Branch0 m)
-> Action m i v ()
stepManyAtNoSync actions = do
b <- use root
let new = Branch.stepManyAt actions b
root .= new
stepManyAtM :: (Monad m, Foldable f)
=> InputDescription
-> f (Path, Branch0 m -> m (Branch0 m))
-> Action m i v ()
stepManyAtM reason actions = do
stepManyAtMNoSync actions
b <- use root
updateRoot b reason
stepManyAtMNoSync :: (Monad m, Foldable f)
=> f (Path, Branch0 m -> m (Branch0 m))
-> Action m i v ()
stepManyAtMNoSync actions = do
b <- use root
b' <- eval . Eval $ Branch.stepManyAtM actions b
root .= b'
stepManyAtM' :: (Monad m, Foldable f)
=> InputDescription
-> f (Path, Branch0 m -> Action m i v (Branch0 m))
-> Action m i v Bool
stepManyAtM' reason actions = do
b <- use root
b' <- Branch.stepManyAtM actions b
updateRoot b' reason
pure (b /= b')
stepManyAtMNoSync' :: (Monad m, Foldable f)
=> f (Path, Branch0 m -> Action m i v (Branch0 m))
-> Action m i v Bool
stepManyAtMNoSync' actions = do
b <- use root
b' <- Branch.stepManyAtM actions b
root .= b'
pure (b /= b')
updateRoot :: Branch m -> InputDescription -> Action m i v ()
updateRoot new reason = do
old <- use lastSavedRoot
when (old /= new) $ do
root .= new
eval $ SyncLocalRootBranch new
eval $ AppendToReflog reason old new
lastSavedRoot .= new
-- cata for 0, 1, or more elements of a Foldable
-- tries to match as lazily as possible
zeroOneOrMore :: Foldable f => f a -> b -> (a -> b) -> (f a -> b) -> b
zeroOneOrMore f zero one more = case toList f of
_ : _ : _ -> more f
a : _ -> one a
_ -> zero
-- Goal: If `remaining = root - toBeDeleted` contains definitions X which
-- depend on definitions Y not in `remaining` (which should also be in
-- `toBeDeleted`), then complain by returning (Y, X).
getEndangeredDependents :: forall m. Monad m
=> (Reference -> m (Set Reference))
-> Names0
-> Names0
-> m (Names0, Names0)
getEndangeredDependents getDependents toDelete root = do
let remaining = root `Names.difference` toDelete
toDelete', remaining', extinct :: Set Reference
toDelete' = Names.allReferences toDelete
remaining' = Names.allReferences remaining -- left over after delete
extinct = toDelete' `Set.difference` remaining' -- deleting and not left over
accumulateDependents m r = getDependents r <&> \ds -> Map.insert r ds m
dependentsOfExtinct :: Map Reference (Set Reference) <-
foldM accumulateDependents mempty extinct
let orphaned, endangered, failed :: Set Reference
orphaned = fold dependentsOfExtinct
endangered = orphaned `Set.intersection` remaining'
failed = Set.filter hasEndangeredDependent extinct
hasEndangeredDependent r = any (`Set.member` endangered)
(dependentsOfExtinct Map.! r)
pure ( Names.restrictReferences failed toDelete
, Names.restrictReferences endangered root `Names.difference` toDelete)
-- Applies the selection filter to the adds/updates of a slurp result,
-- meaning that adds/updates should only contain the selection or its transitive
-- dependencies, any unselected transitive dependencies of the selection will
-- be added to `extraDefinitions`.
applySelection
:: forall v a
. Var v
=> [HQ'.HashQualified Name]
-> UF.TypecheckedUnisonFile v a
-> SlurpResult v
-> SlurpResult v
applySelection [] _ = id
applySelection hqs file = \sr@SlurpResult{..} ->
sr { adds = adds `SC.intersection` closed
, updates = updates `SC.intersection` closed
, extraDefinitions = closed `SC.difference` selection
}
where
selectedNames0 =
Names.filterByHQs (Set.fromList hqs) (UF.typecheckedToNames0 file)
selection, closed :: SlurpComponent v
selection = SlurpComponent selectedTypes selectedTerms
closed = SC.closeWithDependencies file selection
selectedTypes, selectedTerms :: Set v
selectedTypes = Set.map var $ R.dom (Names.types selectedNames0)
selectedTerms = Set.map var $ R.dom (Names.terms selectedNames0)
var :: Var v => Name -> v
var name = Var.named (Name.toText name)
toSlurpResult
:: forall v
. Var v
=> Path.Absolute
-> UF.TypecheckedUnisonFile v Ann
-> Names0
-> SlurpResult v
toSlurpResult currentPath uf existingNames =
Slurp.subtractComponent (conflicts <> ctorCollisions) $ SlurpResult
uf
mempty
adds
dups
mempty
conflicts
updates
termCtorCollisions
ctorTermCollisions
termAliases
typeAliases
mempty
where
fileNames0 = UF.typecheckedToNames0 uf
sc :: R.Relation Name Referent -> R.Relation Name Reference -> SlurpComponent v
sc terms types = SlurpComponent { terms = Set.map var (R.dom terms)
, types = Set.map var (R.dom types) }
-- conflict (n,r) if n is conflicted in names0
conflicts :: SlurpComponent v
conflicts = sc terms types where
terms = R.filterDom (conflicted . Names.termsNamed existingNames)
(Names.terms fileNames0)
types = R.filterDom (conflicted . Names.typesNamed existingNames)
(Names.types fileNames0)
conflicted s = Set.size s > 1
ctorCollisions :: SlurpComponent v
ctorCollisions =
mempty { SC.terms = termCtorCollisions <> ctorTermCollisions }
-- termCtorCollision (n,r) if (n, r' /= r) exists in existingNames and
-- r is Ref and r' is Con
termCtorCollisions :: Set v
termCtorCollisions = Set.fromList
[ var n
| (n, Referent.Ref{}) <- R.toList (Names.terms fileNames0)
, [r@Referent.Con{}] <- [toList $ Names.termsNamed existingNames n]
-- ignore collisions w/ ctors of types being updated
, Set.notMember (Referent.toReference r) typesToUpdate
]
-- the set of typerefs that are being updated by this file
typesToUpdate :: Set Reference
typesToUpdate = Set.fromList
[ r
| (n, r') <- R.toList (Names.types fileNames0)
, r <- toList (Names.typesNamed existingNames n)
, r /= r'
]
-- ctorTermCollisions (n,r) if (n, r' /= r) exists in names0 and r is Con
-- and r' is Ref except we relaxed it to where r' can be Con or Ref
-- what if (n,r) and (n,r' /= r) exists in names and r, r' are Con
ctorTermCollisions :: Set v
ctorTermCollisions = Set.fromList
[ var n
| (n, Referent.Con{}) <- R.toList (Names.terms fileNames0)
, r <- toList $ Names.termsNamed existingNames n
-- ignore collisions w/ ctors of types being updated
, Set.notMember (Referent.toReference r) typesToUpdate
, Set.notMember (var n) (terms dups)
]
-- duplicate (n,r) if (n,r) exists in names0
dups :: SlurpComponent v
dups = sc terms types where
terms = R.intersection (Names.terms existingNames) (Names.terms fileNames0)
types = R.intersection (Names.types existingNames) (Names.types fileNames0)
-- update (n,r) if (n,r' /= r) exists in existingNames and r, r' are Ref
updates :: SlurpComponent v
updates = SlurpComponent (Set.fromList types) (Set.fromList terms) where
terms =
[ var n
| (n, r'@Referent.Ref{}) <- R.toList (Names.terms fileNames0)
, [r@Referent.Ref{}] <- [toList $ Names.termsNamed existingNames n]
, r' /= r
]
types =
[ var n
| (n, r') <- R.toList (Names.types fileNames0)
, [r] <- [toList $ Names.typesNamed existingNames n]
, r' /= r
]
buildAliases
:: R.Relation Name Referent
-> R.Relation Name Referent
-> Set v
-> Map v Slurp.Aliases
buildAliases existingNames namesFromFile duplicates = Map.fromList
[ ( var n
, if null aliasesOfOld
then Slurp.AddAliases aliasesOfNew
else Slurp.UpdateAliases aliasesOfOld aliasesOfNew
)
| (n, r@Referent.Ref{}) <- R.toList namesFromFile
-- All the refs whose names include `n`, and are not `r`
, let
refs = Set.delete r $ R.lookupDom n existingNames
aliasesOfNew =
Set.map (Path.unprefixName currentPath) . Set.delete n $
R.lookupRan r existingNames
aliasesOfOld =
Set.map (Path.unprefixName currentPath) . Set.delete n . R.dom $
R.restrictRan existingNames refs
, not (null aliasesOfNew && null aliasesOfOld)
, Set.notMember (var n) duplicates
]
termAliases :: Map v Slurp.Aliases
termAliases = buildAliases (Names.terms existingNames)
(Names.terms fileNames0)
(SC.terms dups)
typeAliases :: Map v Slurp.Aliases
typeAliases = buildAliases (R.mapRan Referent.Ref $ Names.types existingNames)
(R.mapRan Referent.Ref $ Names.types fileNames0)
(SC.types dups)
-- (n,r) is in `adds` if n isn't in existingNames
adds = sc terms types where
terms = addTerms (Names.terms existingNames) (Names.terms fileNames0)
types = addTypes (Names.types existingNames) (Names.types fileNames0)
addTerms existingNames = R.filter go where
go (n, Referent.Ref{}) = (not . R.memberDom n) existingNames
go _ = False
addTypes existingNames = R.filter go where
go (n, _) = (not . R.memberDom n) existingNames
filterBySlurpResult :: Ord v
=> SlurpResult v
-> UF.TypecheckedUnisonFile v Ann
-> UF.TypecheckedUnisonFile v Ann
filterBySlurpResult SlurpResult{..}
(UF.TypecheckedUnisonFileId
dataDeclarations'
effectDeclarations'
topLevelComponents'
watchComponents
hashTerms) =
UF.TypecheckedUnisonFileId datas effects tlcs watches hashTerms'
where
keep = updates <> adds
keepTerms = SC.terms keep
keepTypes = SC.types keep
hashTerms' = Map.restrictKeys hashTerms keepTerms
datas = Map.restrictKeys dataDeclarations' keepTypes
effects = Map.restrictKeys effectDeclarations' keepTypes
tlcs = filter (not.null) $ fmap (List.filter filterTLC) topLevelComponents'
watches = filter (not.null.snd) $ fmap (second (List.filter filterTLC)) watchComponents
filterTLC (v,_,_) = Set.member v keepTerms
-- updates the namespace for adding `slurp`
doSlurpAdds :: forall m v. (Monad m, Var v)
=> SlurpComponent v
-> UF.TypecheckedUnisonFile v Ann
-> (Branch0 m -> Branch0 m)
doSlurpAdds slurp uf = Branch.stepManyAt0 (typeActions <> termActions)
where
typeActions = map doType . toList $ SC.types slurp
termActions = map doTerm . toList $
SC.terms slurp <> Slurp.constructorsFor (SC.types slurp) uf
names = UF.typecheckedToNames0 uf
tests = Set.fromList $ fst <$> UF.watchesOfKind UF.TestWatch (UF.discardTypes uf)
(isTestType, isTestValue) = isTest
md v =
if Set.member v tests then Metadata.singleton isTestType isTestValue
else Metadata.empty
doTerm :: v -> (Path, Branch0 m -> Branch0 m)
doTerm v = case toList (Names.termsNamed names (Name.fromVar v)) of
[] -> errorMissingVar v
[r] -> case Path.splitFromName (Name.fromVar v) of
Nothing -> errorEmptyVar
Just split -> BranchUtil.makeAddTermName split r (md v)
wha -> error $ "Unison bug, typechecked file w/ multiple terms named "
<> Var.nameStr v <> ": " <> show wha
doType :: v -> (Path, Branch0 m -> Branch0 m)
doType v = case toList (Names.typesNamed names (Name.fromVar v)) of
[] -> errorMissingVar v
[r] -> case Path.splitFromName (Name.fromVar v) of
Nothing -> errorEmptyVar
Just split -> BranchUtil.makeAddTypeName split r Metadata.empty
wha -> error $ "Unison bug, typechecked file w/ multiple types named "
<> Var.nameStr v <> ": " <> show wha
errorEmptyVar = error "encountered an empty var name"
errorMissingVar v = error $ "expected to find " ++ show v ++ " in " ++ show uf
doSlurpUpdates :: Monad m
=> Map Name (Reference, Reference)
-> Map Name (Reference, Reference)
-> [(Name, Referent)]
-> (Branch0 m -> Branch0 m)
doSlurpUpdates typeEdits termEdits deprecated b0 =
Branch.stepManyAt0 (typeActions <> termActions <> deprecateActions) b0
where
typeActions = join . map doType . Map.toList $ typeEdits
termActions = join . map doTerm . Map.toList $ termEdits
deprecateActions = join . map doDeprecate $ deprecated where
doDeprecate (n, r) = case Path.splitFromName n of
Nothing -> errorEmptyVar
Just split -> [BranchUtil.makeDeleteTermName split r]
-- we copy over the metadata on the old thing
-- todo: if the thing being updated, m, is metadata for something x in b0
-- update x's md to reference `m`
doType, doTerm ::
(Name, (Reference, Reference)) -> [(Path, Branch0 m -> Branch0 m)]
doType (n, (old, new)) = case Path.splitFromName n of
Nothing -> errorEmptyVar
Just split -> [ BranchUtil.makeDeleteTypeName split old
, BranchUtil.makeAddTypeName split new oldMd ]
where
oldMd = BranchUtil.getTypeMetadataAt split old b0
doTerm (n, (old, new)) = case Path.splitFromName n of
Nothing -> errorEmptyVar
Just split -> [ BranchUtil.makeDeleteTermName split (Referent.Ref old)
, BranchUtil.makeAddTermName split (Referent.Ref new) oldMd ]
where
-- oldMd is the metadata linked to the old definition
-- we relink it to the new definition
oldMd = BranchUtil.getTermMetadataAt split (Referent.Ref old) b0
errorEmptyVar = error "encountered an empty var name"
loadDisplayInfo ::
Set Reference -> Action m i v ([(Reference, Maybe (Type v Ann))]
,[(Reference, DisplayObject () (DD.Decl v Ann))])
loadDisplayInfo refs = do
termRefs <- filterM (eval . IsTerm) (toList refs)
typeRefs <- filterM (eval . IsType) (toList refs)
terms <- forM termRefs $ \r -> (r,) <$> eval (LoadTypeOfTerm r)
types <- forM typeRefs $ \r -> (r,) <$> loadTypeDisplayObject r
pure (terms, types)
-- Any absolute names in the input which have `currentPath` as a prefix
-- are converted to names relative to current path. all other names are
-- converted to absolute names. For example:
--
-- e.g. if currentPath = .foo.bar
-- then name foo.bar.baz becomes baz
-- name cat.dog becomes .cat.dog
fixupNamesRelative :: Path.Absolute -> Names0 -> Names0
fixupNamesRelative currentPath' = Names3.map0 fixName where
prefix = Path.toName (Path.unabsolute currentPath')
fixName n = if currentPath' == Path.absoluteEmpty then n else
fromMaybe (Name.makeAbsolute n) (Name.stripNamePrefix prefix n)
makeHistoricalParsingNames ::
Monad m => Set (HQ.HashQualified Name) -> Action' m v Names
makeHistoricalParsingNames lexedHQs = do
rawHistoricalNames <- findHistoricalHQs lexedHQs
basicNames0 <- basicParseNames0
currentPath <- use currentPath
pure $ Names basicNames0
(Names3.makeAbsolute0 rawHistoricalNames <>
fixupNamesRelative currentPath rawHistoricalNames)
loadTypeDisplayObject
:: Reference -> Action m i v (DisplayObject () (DD.Decl v Ann))
loadTypeDisplayObject = \case
Reference.Builtin _ -> pure (BuiltinObject ())
Reference.DerivedId id ->
maybe (MissingObject $ Reference.idToShortHash id) UserObject
<$> eval (LoadType id)
lexedSource :: Monad m => SourceName -> Source -> Action' m v (Names, LexedSource)
lexedSource name src = do
let tokens = L.lexer (Text.unpack name) (Text.unpack src)
getHQ = \case
L.Backticks s (Just sh) -> Just (HQ.HashQualified (Name.unsafeFromString s) sh)
L.WordyId s (Just sh) -> Just (HQ.HashQualified (Name.unsafeFromString s) sh)
L.SymbolyId s (Just sh) -> Just (HQ.HashQualified (Name.unsafeFromString s) sh)
L.Hash sh -> Just (HQ.HashOnly sh)
_ -> Nothing
hqs = Set.fromList . mapMaybe (getHQ . L.payload) $ tokens
parseNames <- makeHistoricalParsingNames hqs
pure (parseNames, (src, tokens))
suffixifiedPPE :: Names -> Action' m v PPE.PrettyPrintEnv
suffixifiedPPE ns = eval CodebaseHashLength <&> (`PPE.fromSuffixNames` ns)
fqnPPE :: Names -> Action' m v PPE.PrettyPrintEnv
fqnPPE ns = eval CodebaseHashLength <&> (`PPE.fromNames` ns)
parseSearchType :: (Monad m, Var v)
=> Input -> String -> Action' m v (Either (Output v) (Type v Ann))
parseSearchType input typ = fmap Type.removeAllEffectVars <$> parseType input typ
parseType :: (Monad m, Var v)
=> Input -> String -> Action' m v (Either (Output v) (Type v Ann))
parseType input src = do
-- `show Input` is the name of the "file" being lexed
(names0, lexed) <- lexedSource (Text.pack $ show input) (Text.pack src)
parseNames <- basicParseNames0
let names = Names3.push (Names3.currentNames names0)
(Names3.Names parseNames (Names3.oldNames names0))
e <- eval $ ParseType names lexed
pure $ case e of
Left err -> Left $ TypeParseError src err
Right typ -> case Type.bindNames mempty (Names3.currentNames names)
$ Type.generalizeLowercase mempty typ of
Left es -> Left $ ParseResolutionFailures src (toList es)
Right typ -> Right typ
makeShadowedPrintNamesFromLabeled
:: Monad m => Set LabeledDependency -> Names0 -> Action' m v Names
makeShadowedPrintNamesFromLabeled deps shadowing =
Names3.shadowing shadowing <$> makePrintNamesFromLabeled' deps
makePrintNamesFromLabeled'
:: Monad m => Set LabeledDependency -> Action' m v Names
makePrintNamesFromLabeled' deps = do
root <- use root
currentPath <- use currentPath
(_missing, rawHistoricalNames) <- eval . Eval $ Branch.findHistoricalRefs
deps
root
basicNames0 <- basicPrettyPrintNames0A
pure $ Names basicNames0 (fixupNamesRelative currentPath rawHistoricalNames)
getTermsIncludingHistorical
:: Monad m => Path.HQSplit -> Branch0 m -> Action' m v (Set Referent)
getTermsIncludingHistorical (p, hq) b = case Set.toList refs of
[] -> case hq of
HQ'.HashQualified n hs -> do
names <- findHistoricalHQs
$ Set.fromList [HQ.HashQualified (Name.unsafeFromText (NameSegment.toText n)) hs]
pure . R.ran $ Names.terms names
_ -> pure Set.empty
_ -> pure refs
where refs = BranchUtil.getTerm (p, hq) b
-- discards inputs that aren't hashqualified;
-- I'd enforce it with finer-grained types if we had them.
findHistoricalHQs :: Monad m => Set (HQ.HashQualified Name) -> Action' m v Names0
findHistoricalHQs lexedHQs0 = do
root <- use root
currentPath <- use currentPath
let
-- omg this nightmare name-to-path parsing code is littered everywhere.
-- We need to refactor so that the absolute-ness of a name isn't represented
-- by magical text combinations.
-- Anyway, this function takes a name, tries to determine whether it is
-- relative or absolute, and tries to return the corresponding name that is
-- /relative/ to the root.
preprocess n = case Name.toString n of
-- some absolute name that isn't just "."
'.' : t@(_:_) -> Name.unsafeFromString t
-- something in current path
_ -> if Path.isRoot currentPath then n
else Name.joinDot (Path.toName . Path.unabsolute $ currentPath) n
lexedHQs = Set.map (fmap preprocess) . Set.filter HQ.hasHash $ lexedHQs0
(_missing, rawHistoricalNames) <- eval . Eval $ Branch.findHistoricalHQs lexedHQs root
pure rawHistoricalNames
basicPrettyPrintNames0A :: Functor m => Action' m v Names0
basicPrettyPrintNames0A = snd <$> basicNames0'
makeShadowedPrintNamesFromHQ :: Monad m => Set (HQ.HashQualified Name) -> Names0 -> Action' m v Names
makeShadowedPrintNamesFromHQ lexedHQs shadowing = do
rawHistoricalNames <- findHistoricalHQs lexedHQs
basicNames0 <- basicPrettyPrintNames0A
currentPath <- use currentPath
-- The basic names go into "current", but are shadowed by "shadowing".
-- They go again into "historical" as a hack that makes them available HQ-ed.
pure $
Names3.shadowing
shadowing
(Names basicNames0 (fixupNamesRelative currentPath rawHistoricalNames))
basicParseNames0, slurpResultNames0 :: Functor m => Action' m v Names0
basicParseNames0 = fst <$> basicNames0'
-- we check the file against everything in the current path
slurpResultNames0 = currentPathNames0
currentPathNames0 :: Functor m => Action' m v Names0
currentPathNames0 = do
currentPath' <- use currentPath
currentBranch' <- getAt currentPath'
pure $ Branch.toNames0 (Branch.head currentBranch')
-- implementation detail of basicParseNames0 and basicPrettyPrintNames0
basicNames0' :: Functor m => Action' m v (Names0, Names0)
basicNames0' = do
root' <- use root
currentPath' <- use currentPath
pure $ Backend.basicNames0' root' (Path.unabsolute currentPath')
data AddRunMainResult v
= NoTermWithThatName
| TermHasBadType (Type v Ann)
| RunMainSuccess (TypecheckedUnisonFile v Ann)
-- Adds a watch expression of the given name to the file, if
-- it would resolve to a TLD in the file. Returns the freshened
-- variable name and the new typechecked file.
--
-- Otherwise, returns `Nothing`.
addWatch
:: (Monad m, Var v)
=> String
-> Maybe (TypecheckedUnisonFile v Ann)
-> Action' m v (Maybe (v, TypecheckedUnisonFile v Ann))
addWatch _watchName Nothing = pure Nothing
addWatch watchName (Just uf) = do
let components = join $ UF.topLevelComponents uf
let mainComponent = filter ((\v -> Var.nameStr v == watchName) . view _1) components
case mainComponent of
[(v, tm, ty)] -> pure . pure $ let
v2 = Var.freshIn (Set.fromList [v]) v
a = ABT.annotation tm
in (v2, UF.typecheckedUnisonFile
(UF.dataDeclarationsId' uf)
(UF.effectDeclarationsId' uf)
(UF.topLevelComponents' uf)
(UF.watchComponents uf <> [(UF.RegularWatch, [(v2, Term.var a v, ty)])]))
_ -> addWatch watchName Nothing
-- Given a typechecked file with a main function called `mainName`
-- of the type `'{IO} ()`, adds an extra binding which
-- forces the `main` function.
--
-- If that function doesn't exist in the typechecked file, the
-- codebase is consulted.
addRunMain
:: (Monad m, Var v)
=> String
-> Maybe (TypecheckedUnisonFile v Ann)
-> Action' m v (AddRunMainResult v)
addRunMain mainName Nothing = do
parseNames0 <- basicParseNames0
let loadTypeOfTerm ref = eval $ LoadTypeOfTerm ref
mainType <- eval RuntimeMain
mainToFile <$>
MainTerm.getMainTerm loadTypeOfTerm parseNames0 mainName mainType
where
mainToFile (MainTerm.NotAFunctionName _) = NoTermWithThatName
mainToFile (MainTerm.NotFound _) = NoTermWithThatName
mainToFile (MainTerm.BadType _ ty) = maybe NoTermWithThatName TermHasBadType ty
mainToFile (MainTerm.Success hq tm typ) = RunMainSuccess $
let v = Var.named (HQ.toText hq) in
UF.typecheckedUnisonFile mempty mempty mempty [("main",[(v, tm, typ)])] -- mempty
addRunMain mainName (Just uf) = do
let components = join $ UF.topLevelComponents uf
let mainComponent = filter ((\v -> Var.nameStr v == mainName) . view _1) components
mainType <- eval RuntimeMain
case mainComponent of
[(v, tm, ty)] -> pure $ let
v2 = Var.freshIn (Set.fromList [v]) v
a = ABT.annotation tm
in
if Typechecker.isSubtype ty mainType then RunMainSuccess $ let
runMain = DD.forceTerm a a (Term.var a v)
in UF.typecheckedUnisonFile
(UF.dataDeclarationsId' uf)
(UF.effectDeclarationsId' uf)
(UF.topLevelComponents' uf)
(UF.watchComponents uf <> [("main", [(v2, runMain, mainType)])])
else TermHasBadType ty
_ -> addRunMain mainName Nothing
executePPE
:: (Var v, Monad m)
=> TypecheckedUnisonFile v a
-> Action' m v PPE.PrettyPrintEnv
executePPE unisonFile =
suffixifiedPPE =<< displayNames unisonFile
-- Produce a `Names` needed to display all the hashes used in the given file.
displayNames :: (Var v, Monad m)
=> TypecheckedUnisonFile v a
-> Action' m v Names
displayNames unisonFile =
-- voodoo
makeShadowedPrintNamesFromLabeled
(UF.termSignatureExternalLabeledDependencies unisonFile)
(UF.typecheckedToNames0 unisonFile)
diffHelper :: Monad m
=> Branch0 m
-> Branch0 m
-> Action' m v (PPE.PrettyPrintEnv, OBranchDiff.BranchDiffOutput v Ann)
diffHelper before after = do
hqLength <- eval CodebaseHashLength
diff <- eval . Eval $ BranchDiff.diff0 before after
names0 <- basicPrettyPrintNames0A
ppe <- PPE.suffixifiedPPE <$> prettyPrintEnvDecl (Names names0 mempty)
(ppe,) <$>
OBranchDiff.toOutput
loadTypeOfTerm
declOrBuiltin
hqLength
(Branch.toNames0 before)
(Branch.toNames0 after)
ppe
diff
loadTypeOfTerm :: Referent -> Action m i v (Maybe (Type v Ann))
loadTypeOfTerm (Referent.Ref r) = eval $ LoadTypeOfTerm r
loadTypeOfTerm (Referent.Con (Reference.DerivedId r) cid _) = do
decl <- eval $ LoadType r
case decl of
Just (either DD.toDataDecl id -> dd) -> pure $ DD.typeOfConstructor dd cid
Nothing -> pure Nothing
loadTypeOfTerm Referent.Con{} = error $
reportBug "924628772" "Attempt to load a type declaration which is a builtin!"
declOrBuiltin :: Reference -> Action m i v (Maybe (DD.DeclOrBuiltin v Ann))
declOrBuiltin r = case r of
Reference.Builtin{} ->
pure . fmap DD.Builtin $ Map.lookup r Builtin.builtinConstructorType
Reference.DerivedId id ->
fmap DD.Decl <$> eval (LoadType id)
|
unisonweb/platform
|
parser-typechecker/src/Unison/Codebase/Editor/HandleInput.hs
|
Haskell
|
mit
| 132,775
|
{-# LANGUAGE DeriveGeneric #-}
{-# LANGUAGE FlexibleInstances #-}
{-# LANGUAGE StandaloneDeriving #-}
{-# LANGUAGE TypeSynonymInstances #-}
{-# OPTIONS_GHC -Wno-orphans #-}
module Instances.Response where
import GHC.Generics
import Test.QuickCheck.Arbitrary.Generic
import Test.QuickCheck.Instances()
import Web.Facebook.Messenger.Types.Responses
import Instances.Request()
import Instances.Static()
deriving instance Generic MessageResponse
instance Arbitrary MessageResponse where
arbitrary = genericArbitrary
shrink = genericShrink
deriving instance Generic SenderActionResponse
instance Arbitrary SenderActionResponse where
arbitrary = genericArbitrary
shrink = genericShrink
deriving instance Generic SuccessResponse
instance Arbitrary SuccessResponse where
arbitrary = genericArbitrary
shrink = genericShrink
deriving instance Generic MessageCreativeResponse
instance Arbitrary MessageCreativeResponse where
arbitrary = genericArbitrary
shrink = genericShrink
deriving instance Generic BroadcastMessageResponse
instance Arbitrary BroadcastMessageResponse where
arbitrary = genericArbitrary
shrink = genericShrink
deriving instance Generic GetProfileResponse
instance Arbitrary GetProfileResponse where
arbitrary = genericArbitrary
shrink = genericShrink
deriving instance Generic ErrorResponse
instance Arbitrary ErrorResponse where
arbitrary = genericArbitrary
shrink = genericShrink
deriving instance Generic ErrorDetails
instance Arbitrary ErrorDetails where
arbitrary = genericArbitrary
shrink = genericShrink
deriving instance Generic AttachmentUploadResponse
instance Arbitrary AttachmentUploadResponse where
arbitrary = genericArbitrary
shrink = genericShrink
deriving instance Generic UserProfileResponse
instance Arbitrary UserProfileResponse where
arbitrary = genericArbitrary
shrink = genericShrink
deriving instance Generic MessengerCodeResponse
instance Arbitrary MessengerCodeResponse where
arbitrary = genericArbitrary
shrink = genericShrink
deriving instance Generic AccountLinkingResponse
instance Arbitrary AccountLinkingResponse where
arbitrary = genericArbitrary
shrink = genericShrink
-- | Only taking 5 to speed up testing
deriving instance Generic CheckoutUpdateResponse
instance Arbitrary CheckoutUpdateResponse where
arbitrary = CheckoutUpdateResponse <$> fmap (take 5) arbitrary
shrink = genericShrink
deriving instance Generic Shipping
instance Arbitrary Shipping where
arbitrary = genericArbitrary
shrink = genericShrink
deriving instance Generic ThreadControlResponse
instance Arbitrary ThreadControlResponse where
arbitrary = genericArbitrary
shrink = genericShrink
deriving instance Generic ThreadOwnerResponse
instance Arbitrary ThreadOwnerResponse where
arbitrary = genericArbitrary
shrink = genericShrink
deriving instance {-# OVERLAPPABLE #-} Generic (DataResponse a)
instance {-# OVERLAPPABLE #-} (Arbitrary a, Generic a) => Arbitrary (DataResponse a) where
arbitrary = genericArbitrary
shrink = genericShrink
deriving instance Generic DomainWhitelistingResponse
instance Arbitrary DomainWhitelistingResponse where
arbitrary = genericArbitrary
shrink = genericShrink
deriving instance Generic SecondaryReceiverResponse
instance Arbitrary SecondaryReceiverResponse where
arbitrary = genericArbitrary
shrink = genericShrink
deriving instance Generic SecondaryReceiverElement
instance Arbitrary SecondaryReceiverElement where
arbitrary = genericArbitrary
shrink = genericShrink
deriving instance Generic TagResponse
instance Arbitrary TagResponse where
arbitrary = genericArbitrary
shrink = genericShrink
deriving instance Generic TagElement
instance Arbitrary TagElement where
arbitrary = genericArbitrary
shrink = genericShrink
|
Vlix/facebookmessenger
|
test/Instances/Response.hs
|
Haskell
|
mit
| 3,784
|
{-# LANGUAGE ScopedTypeVariables #-}
{-# LANGUAGE UndecidableInstances #-}
module WeiXin.PublicPlatform.Conversation.Yesod where
-- {{{1 imports
import ClassyPrelude.Yesod hiding (Proxy, proxy)
import qualified Control.Exception.Safe as ExcSafe
import Control.Monad.Logger
import Data.Proxy
import qualified Data.ByteString.Lazy as LB
import qualified Data.Text as T
import qualified Data.Aeson as A
import qualified Data.Aeson.Types as A
import qualified Data.Conduit.List as CL
import Control.Monad.Except hiding (forM_)
import Control.Monad.Trans.Maybe
import Data.Time (NominalDiffTime, addUTCTime)
import Data.List.NonEmpty (NonEmpty(..))
import WeiXin.PublicPlatform.Conversation
import WeiXin.PublicPlatform.Class
import WeiXin.PublicPlatform.Yesod.Model
import WeiXin.PublicPlatform.InMsgHandler
import WeiXin.PublicPlatform.WS
import WeiXin.PublicPlatform.Utils
import WeiXin.PublicPlatform.Media
-- }}}1
saveWxppTalkState :: forall m a r. ( MonadLoggerIO m, ToJSON a, WxTalkerState r m a)
=> WxppDbRunner
-> (a -> Text)
-> WxppTalkStateId
-> a
-> WxTalkerMonad r m ()
saveWxppTalkState db_runner get_state_type state_id x = mkWxTalkerMonad $ \env -> runExceptT $ do
now <- liftIO getCurrentTime
done <- ExceptT $ flip runWxTalkerMonad env $ wxTalkIfDone x
log_func <- askLoggerIO
liftIO $ flip runLoggingT log_func $ runWxppDB db_runner $ do
update state_id
[ WxppTalkStateTyp =. get_state_type x
, WxppTalkStateJson =. (LB.toStrict $ A.encode x)
, WxppTalkStateDone =. done
, WxppTalkStateUpdatedTime =. now
]
abortCurrentWxppTalkState :: forall m r. (MonadLoggerIO m)
=> WxppDbRunner
-> r
-> (Text -> Maybe (WxppTalkerAbortStateEntry r m))
-- ^ lookup WxppTalkerAbortStateEntry by state's type string
-> WxTalkAbortInitiator
-> WxppAppID
-> WxppOpenID
-> m (Maybe [WxppOutMsg])
-- {{{1
abortCurrentWxppTalkState db_runner common_env lookup_se initiator app_id open_id = do
log_func <- askLoggerIO
m_rec <- liftIO $ flip runLoggingT log_func $ runWxppDB db_runner $ loadWxppTalkStateCurrent app_id open_id
fmap join $ forM m_rec $ \ e_rec@(Entity rec_id rec) -> do
if not $ wxppTalkStateAborted rec || wxppTalkStateDone rec
then fmap Just $ do
out_msgs <- wxppExecTalkAbortForRecord common_env lookup_se initiator e_rec
liftIO $ flip runLoggingT log_func $ runWxppDB db_runner $ update rec_id [ WxppTalkStateAborted =. True ]
return out_msgs
else return Nothing
-- }}}1
wxppExecTalkAbortForRecord :: (MonadLogger m)
=> r
-> (Text -> Maybe (WxppTalkerAbortStateEntry r m))
-- ^ lookup WxppTalkerAbortStateEntry by state's type string
-> WxTalkAbortInitiator
-> Entity WxppTalkState
-> m [WxppOutMsg]
wxppExecTalkAbortForRecord common_env lookup_se initiator e_rec@(Entity rec_id rec) = do
-- {{{1
case lookup_se typ_str of
Nothing -> do
$logWarnS wxppLogSource $ "could not find state entry for record #" <> toPathPiece rec_id
<> ", type string was: " <> typ_str
return []
Just (WxppTalkerAbortStateEntry sp ext_env) -> do
err_or_st <- parseWxppTalkStateFromRecord sp e_rec
case err_or_st of
Left err -> do
$logErrorS wxppLogSource $ "parseWxppTalkStateFromRecord failed for record #" <> toPathPiece rec_id
<> ": " <> fromString err
return []
Right Nothing -> return []
Right (Just st) -> do
err_or_outmsgs <- flip runWxTalkerMonad (common_env, ext_env) $ wxTalkAbort st initiator
case err_or_outmsgs of
Left err -> do
$logErrorS wxppLogSource $ "wxTalkAbort failed for record #" <> toPathPiece rec_id
<> ": " <> fromString err
return []
Right x -> return x
where typ_str = wxppTalkStateTyp rec
-- }}}1
newWxppTalkState :: forall m a.
( MonadIO m, ToJSON a) =>
(a -> Text)
-> WxppAppID
-> WxppOpenID
-> a
-> ReaderT SqlBackend m WxppTalkStateId
newWxppTalkState get_state_type app_id open_id x = do
now <- liftIO getCurrentTime
insert $ WxppTalkState
app_id
open_id
(get_state_type x)
(LB.toStrict $ A.encode x)
False
False
now
now
newWxppTalkState' :: forall m a.
( MonadIO m, HasStateType a, ToJSON a ) =>
WxppAppID
-> WxppOpenID
-> a
-> ReaderT SqlBackend m WxppTalkStateId
newWxppTalkState' = newWxppTalkState getStateType'
saveAnyWxppTalkState :: forall m r a. (MonadLoggerIO m, ToJSON a, HasStateType a, WxTalkerState r m a)
=> WxppDbRunner
-> WxppTalkStateId
-> a
-> WxTalkerMonad r m ()
saveAnyWxppTalkState db_runner = saveWxppTalkState db_runner getStateType'
loadAnyWxppTalkState :: forall m a. (MonadLoggerIO m, FromJSON a, HasStateType a)
=> WxppDbRunner
-> Proxy a
-> WxppTalkStateId
-> m (Either String (Maybe a))
loadAnyWxppTalkState db_runner proxy state_id = runExceptT $ runMaybeT $ do
log_func <- askLoggerIO
rec <- MaybeT $ liftIO $ flip runLoggingT log_func $ runWxppDB db_runner $ get state_id
MaybeT $ ExceptT $ parseWxppTalkStateFromRecord proxy $ Entity state_id rec
parseWxppTalkStateFromRecord :: forall m a.
( MonadLogger m, HasStateType a, FromJSON a ) =>
Proxy a
-> Entity WxppTalkState
-> m (Either String (Maybe a))
parseWxppTalkStateFromRecord proxy (Entity rec_id rec) = runExceptT $ runMaybeT $ do
when ( wxppTalkStateAborted rec ) mzero
let state_type = wxppTalkStateTyp rec
when (state_type /= getStateType proxy) mzero
case A.eitherDecodeStrict (wxppTalkStateJson rec) of
Left err -> do
let err_msg = "cannot decode JSON ByteString: WxppTalkState #"
<> toPathPiece rec_id
<> ", " <> fromString err
$logErrorS wxppLogSource err_msg
throwError $ T.unpack err_msg
Right jv -> do
case A.parseEither parseJSON jv of
Left jerr -> do
let err_msg = "cannot decode JSON Value: WxppTalkState #"
<> toPathPiece rec_id
<> ", " <> fromString jerr
$logErrorS wxppLogSource err_msg
throwError $ T.unpack err_msg
Right x -> return x
{-
saveSomeWxppTalkState :: forall m r.
( MonadIO m ) =>
WxppTalkStateId
-> SomeWxppTalkState r (ReaderT SqlBackend m)
-> WxTalkerMonad r (ReaderT SqlBackend m) ()
saveSomeWxppTalkState = saveWxppTalkState getStateTypeOfSomeWxppTalkState
--}
loadWxppTalkStateCurrent :: forall m. (MonadIO m)
=> WxppAppID
-> WxppOpenID
-> ReaderT WxppDbBackend m (Maybe (Entity WxppTalkState))
loadWxppTalkStateCurrent app_id open_id = do
selectFirst [ WxppTalkStateOpenId ==. open_id
, WxppTalkStateAppId ==. app_id
]
[ Desc WxppTalkStateId ]
-- | used with loopRunBgJob
cleanUpTimedOutWxTalk :: (MonadLoggerIO m, HasWxppAppID r)
=> WxppDbRunner
-> r
-> [WxppTalkerAbortStateEntry r m]
-- ^ lookup WxppTalkerAbortStateEntry by state's type string
-> (NominalDiffTime, NominalDiffTime)
-> (WxppAppID -> WxppOpenID -> [WxppOutMsg] -> m ())
-> m ()
cleanUpTimedOutWxTalk db_runner common_env entries ttls on_abort_talk = do
-- {{{1
let timeout_ttl = uncurry min ttls
chk_ttl = uncurry max ttls
now <- liftIO getCurrentTime
let dt = addUTCTime (negate $ abs timeout_ttl) now
-- 为保证下面的 SQL 不会从一个太大集合中查找
let too_old = flip addUTCTime now $ negate $ chk_ttl
log_func <- askLoggerIO
m_new_records <- liftIO $ runResourceT $ flip runLoggingT log_func $ runWxppDB db_runner $ do
infos <- runConduit $
selectSource
[ WxppTalkStateDone ==. False
, WxppTalkStateAborted ==. False
, WxppTalkStateAppId ==. app_id
, WxppTalkStateUpdatedTime <. dt
, WxppTalkStateUpdatedTime >. too_old
]
[]
.| CL.map (id &&& (wxppTalkStateOpenId . entityVal))
.| CL.consume
-- 一次性用一个 SQL 更新
updateWhere
[ WxppTalkStateId <-. map (entityKey . fst) infos ]
[ WxppTalkStateAborted =. True ]
-- 然后通告用户
forM infos $ \(e_rec@(Entity rec_id _), open_id) -> do
m_new <- selectFirst
[ WxppTalkStateAppId ==. app_id
, WxppTalkStateOpenId ==. open_id
, WxppTalkStateId >. rec_id
]
[]
return (e_rec, open_id, m_new)
forM_ m_new_records $ \ (e_rec, open_id, m_new) -> do
-- 仅当那个用户没有更新的会话已建立时才发通告給用户
when (isNothing m_new) $ do
out_msgs <- wxppExecTalkAbortForRecord common_env lookup_se WxTalkAbortBySys e_rec
on_abort_talk app_id open_id out_msgs
where
app_id = getWxppAppID common_env
match_entry typ_str (WxppTalkerAbortStateEntry p _) = getStateType p == typ_str
lookup_se = \ x -> find (match_entry x) entries
-- }}}1
-- | 仅是为了减少代码重复而设
-- 由于这个对象可以构造其它需求稍低一些的 WxppTalkerStateEntry WxppTalkerFreshStateEntry WxppTalkerAbortStateEntry
data WxppTalkerFullStateEntry r0 m = forall s r.
(Eq s, ToJSON s, FromJSON s, HasStateType s
, WxTalkerState (r0, r) m s
, WxTalkerDoneAction (r0, r) m s
, WxTalkerAbortAction (r0, r) m s
, WxTalkerFreshState (r0, r) m s
)
=> WxppTalkerFullStateEntry (Proxy s) r
-- | 作为下面 WxppTalkHandlerGeneral 的参数用
-- r0 是 WxppTalkHandlerGeneral 提供的全局环境
data WxppTalkerStateEntry r0 m = forall s r.
(Eq s, ToJSON s, FromJSON s, HasStateType s
, WxTalkerState (r0, r) m s
, WxTalkerDoneAction (r0, r) m s
) =>
WxppTalkerStateEntry (Proxy s) r
wxppTalkerStateEntryFromFull :: WxppTalkerFullStateEntry r m -> WxppTalkerStateEntry r m
wxppTalkerStateEntryFromFull (WxppTalkerFullStateEntry p x) = WxppTalkerStateEntry p x
-- | 这个通用的对话处理器
-- 所有输入都应经过这个处理器处理一次
-- 如果在对话中,则有相应的处理
-- 不在对话中,则相当于空操作
data WxppTalkHandlerGeneral r m = WxppTalkHandlerGeneral
{ wxppTalkDbRunner :: WxppDbRunner -- ^ to run db functions
, wxppTalkDbReadOnlyEnv :: r -- ^ read only data/environment
, wxppTalkDStateEntry :: [WxppTalkerStateEntry r m ]
}
instance JsonConfigable (WxppTalkHandlerGeneral r m) where
type JsonConfigableUnconfigData (WxppTalkHandlerGeneral r m) =
(WxppDbRunner, r, [WxppTalkerStateEntry r m])
isNameOfInMsgHandler _ x = x == "any-talk"
parseWithExtraData _ (f1, f2, f3) _obj = return $ WxppTalkHandlerGeneral f1 f2 f3
type instance WxppInMsgProcessResult (WxppTalkHandlerGeneral r m) = WxppInMsgHandlerResult
instance (WxppApiMonad env m, MonadLoggerIO m) =>
IsWxppInMsgProcessor m (WxppTalkHandlerGeneral r m)
where
processInMsg (WxppTalkHandlerGeneral db_runner env entries) _cache app_info _bs ime = do
log_func <- askLoggerIO
runExceptT $ do
m_state_rec <- mapExceptT (liftIO . flip runLoggingT log_func . runWxppDB db_runner) $ do
lift $ loadWxppTalkStateCurrent app_id open_id
case m_state_rec of
Nothing -> return []
Just e_state_rec@(Entity state_id _) -> do
let mk :: WxppTalkerStateEntry r m -> MaybeT (ExceptT String m) WxppInMsgHandlerResult
mk (WxppTalkerStateEntry state_proxy rx) = MaybeT $ ExceptT $
processInMsgByWxTalk
db_runner
state_proxy
(env, rx)
e_state_rec
ime
m_result <- runMaybeT $ asum $ map mk entries
case m_result of
Nothing -> do
$logWarnS wxppLogSource $ "no handler could handle talk state: state_id="
<> toPathPiece state_id
return []
Just x -> return x
{-
m_state_info <- ExceptT $ loadWxppTalkStateCurrent open_id
case m_state_info of
Nothing -> return []
Just (db_id, _ :: SomeWxppTalkState CommonTalkEnv (ReaderT SqlBackend m) ) -> do
ExceptT $ processInMsgByWxTalk
(mk_env $ wxppInFromUserName ime)
db_id
ime
--}
where
app_id = procAppIdInfoReceiverId app_info
open_id = wxppInFromUserName ime
-- | 消息处理器:调用后会新建一个会话
-- 对话状态由类型参数 s 指定
-- 因为它本身不带条件,所以常常配合条件判断器使用
-- 但也条件判断也可以在 wxTalkInitiate 里实现
data WxppTalkInitiator r s = WxppTalkInitiator
{ wxppTalkInitDbRunner :: WxppDbRunner
, wxppTalkInitEnv :: r -- ^ 与对话种类无关的环境值
, wxppTalkInitStateEnv :: (WxppTalkStateExtraEnv s) -- ^ 对话特定相关的环境值
}
instance HasStateType s => JsonConfigable (WxppTalkInitiator r s) where
type JsonConfigableUnconfigData (WxppTalkInitiator r s) =
(WxppDbRunner, r, WxppTalkStateExtraEnv s)
isNameOfInMsgHandler _ x =
x == "initiate-talk:" <> getStateType (Proxy :: Proxy s)
parseWithExtraData _ (f1, f2, f3) _obj = return $ WxppTalkInitiator f1 f2 f3
type instance WxppInMsgProcessResult (WxppTalkInitiator r s) = WxppInMsgHandlerResult
instance
( HasStateType s, ToJSON s, FromJSON s, Eq s
, HasWxppAppID r
, WxTalkerDoneAction (r, r2) m s
, WxTalkerState (r, r2) m s
, WxTalkerFreshState (r, r2) m s
, r2 ~ WxppTalkStateExtraEnv s
, MonadLoggerIO m
) =>
IsWxppInMsgProcessor m (WxppTalkInitiator r s)
where
processInMsg (WxppTalkInitiator db_runner env extra_env) _cache _app_info _bs ime =
runExceptT $ do
let from_open_id = wxppInFromUserName ime
app_id = getWxppAppID env
log_func <- askLoggerIO
msgs_or_state <- flip runWxTalkerMonadE (env, extra_env) $ wxTalkInitiate ime
case msgs_or_state of
Left msgs -> do
-- cannot create conversation
return $ map ((False,) . Just) $ msgs
Right (state :: s) -> do
-- state_id <- lift $ newWxppTalkState' app_id from_open_id state
e_state <- liftIO . flip runLoggingT log_func . runWxppDB db_runner $ do
newWxppTalkState' app_id from_open_id state
ExceptT $ processJustInitedWxTalk db_runner
(Proxy :: Proxy s) (env, extra_env) e_state
-- | 与 WxppTalkerStateEntry 的区别只是多了 WxTalkerFreshState 的要求
data WxppTalkerFreshStateEntry r0 m = forall s r.
(Eq s, ToJSON s, FromJSON s, HasStateType s
, WxTalkerState (r0, r) m s
, WxTalkerDoneAction (r0, r) m s
, WxTalkerFreshState (r0, r) m s
) =>
WxppTalkerFreshStateEntry (Proxy s) r
wxppTalkerFreshStateEntryFromFull :: WxppTalkerFullStateEntry r m -> WxppTalkerFreshStateEntry r m
wxppTalkerFreshStateEntryFromFull (WxppTalkerFullStateEntry p x) = WxppTalkerFreshStateEntry p x
wxppTalkerFreshStateEntryToStateEntry :: WxppTalkerFreshStateEntry r m -> WxppTalkerStateEntry r m
wxppTalkerFreshStateEntryToStateEntry (WxppTalkerFreshStateEntry p x) = WxppTalkerStateEntry p x
-- | 消息处理器:调用后会新建一个会话
-- 它接受的 event key 必须是以下的形式: initiate-talk:XXX
-- 其中 XXX 是某个对话状态的 getStateType 内容
-- 与 WxppTalkInitiator 类似,不同的是:
-- * WxppTalkInitiator 只能初始化确定的某种对话,
-- WxppTalkEvtKeyInitiator则在一组可能选择里选择一个
-- * WxppTalkInitiator 本身不带判断条件,
-- WxppTalkEvtKeyInitiator 则根据 event key 内容选择合适的对话类型
data WxppTalkEvtKeyInitiator r m = WxppTalkEvtKeyInitiator
{ wxppTalkEvtKeyInitDbRunner :: WxppDbRunner
, wxppTalkEvtKeyInitEventEnv :: r -- ^ 与对话种类无关的环境值
, wxppTalkEvtKeyInitStateEntry :: [WxppTalkerFreshStateEntry r m]
}
instance JsonConfigable (WxppTalkEvtKeyInitiator r m) where
type JsonConfigableUnconfigData (WxppTalkEvtKeyInitiator r m) =
(WxppDbRunner, r, [WxppTalkerFreshStateEntry r m])
isNameOfInMsgHandler _ x = x == "evtkey-initiate-talk"
parseWithExtraData _ (f1, f2, f3) _obj = return $ WxppTalkEvtKeyInitiator f1 f2 f3
type instance WxppInMsgProcessResult (WxppTalkEvtKeyInitiator r m) = WxppInMsgHandlerResult
instance
( HasWxppAppID r
, MonadIO m, MonadLoggerIO m
) =>
IsWxppInMsgProcessor m (WxppTalkEvtKeyInitiator r m)
where
processInMsg (WxppTalkEvtKeyInitiator db_runner env entries) _cache _app_info _bs ime =
runExceptT $ do
case wxppInMessage ime of
WxppInMsgEvent (WxppEvtClickItem evtkey) -> do
case T.stripPrefix "initiate-talk:" evtkey of
Nothing -> return []
Just st_type -> do_work st_type
_ -> return []
where
do_work st_type = do
let match_st_type (WxppTalkerFreshStateEntry px _) = getStateType px == st_type
log_func <- askLoggerIO
case find match_st_type entries of
Nothing -> do
$logWarnS wxppLogSource $
"Failed to initiate talk from menu click,"
<> " because talk state type is unknown to me: "
<> st_type
return []
Just (WxppTalkerFreshStateEntry st_px extra_env) -> do
let from_open_id = wxppInFromUserName ime
app_id = getWxppAppID env
msgs_or_state <- flip runWxTalkerMonadE (env, extra_env) $
wxTalkInitiateBlank st_px from_open_id
case msgs_or_state of
Left msgs -> do
-- cannot create conversation
$logErrorS wxppLogSource $
"Couldn't create talk, providing error output messages: " <> tshow msgs
return $ map ((False,) . Just) $ msgs
Right state -> do
-- state_id <- lift $ newWxppTalkState' app_id from_open_id state
e_state <- liftIO $ flip runLoggingT log_func $ runWxppDB db_runner $
newWxppTalkState' app_id from_open_id state
ExceptT $ processJustInitedWxTalk db_runner st_px (env, extra_env) e_state
-- | 与 WxppTalkerStateEntry 的区别只是多了 WxTalkerFreshState 的要求
data WxppTalkerAbortStateEntry r0 m = forall s r.
(Eq s, ToJSON s, FromJSON s, HasStateType s
, WxTalkerAbortAction (r0, r) m s
)
=> WxppTalkerAbortStateEntry (Proxy s) r
wxppTalkerAbortStateEntryFromFull :: WxppTalkerFullStateEntry r m -> WxppTalkerAbortStateEntry r m
wxppTalkerAbortStateEntryFromFull (WxppTalkerFullStateEntry p x) = WxppTalkerAbortStateEntry p x
-- | 消息处理器:调用后会无条件结束当前会话
data WxppTalkTerminator r m = WxppTalkTerminator
{ wxppTalkTermDir :: (NonEmpty FilePath) -- ^ out-msg dir path
, wxppTalkTermDbRunner :: WxppDbRunner
, wxppTalkTermCommonEnv :: r -- ^ read only data/environment
, wxppTalkTermStateEntries :: [ WxppTalkerAbortStateEntry r m ]
-- , wxppTalkTermPrimary :: Bool -- ^ if primary
-- 一但这个处理器被调用,则很可能已发生数据库的实质修改
-- 这里再指定是否 primary 已无太大意义,应总是理解为primary响应
, wxppTalkTermOurMsg :: WxppOutMsgLoader -- ^ 打算回复用户的消息
}
instance JsonConfigable (WxppTalkTerminator r m) where
type JsonConfigableUnconfigData (WxppTalkTerminator r m) = (NonEmpty FilePath, WxppDbRunner, r, [ WxppTalkerAbortStateEntry r m ])
isNameOfInMsgHandler _ x = x == "terminate-talk"
parseWithExtraData _ (f1, f2, f3, f4) obj =
WxppTalkTerminator f1 f2 f3 f4
-- <$> (obj .:? "primary" .!= True)
<$> parseWxppOutMsgLoader obj
type instance WxppInMsgProcessResult (WxppTalkTerminator r m) = WxppInMsgHandlerResult
instance (WxppApiMonad env m, MonadLoggerIO m, ExcSafe.MonadCatch m) =>
IsWxppInMsgProcessor m (WxppTalkTerminator r m) where
processInMsg (WxppTalkTerminator msg_dirs db_runner common_env entries get_outmsg) cache app_info _bs ime =
-- {{{1
runExceptT $ do
let from_open_id = wxppInFromUserName ime
m_out_msgs_abort <-
lift $ abortCurrentWxppTalkState db_runner common_env (\ x -> find (match_entry x) entries) WxTalkAbortByUser app_id from_open_id
liftM (fromMaybe []) $ forM m_out_msgs_abort $ \ out_msgs_abort -> do
let get_atk = (tryWxppWsResultE "getting access token" $ liftIO $
wxppCacheGetAccessToken cache app_id)
>>= maybe (throwError $ "no access token available") (return . fst)
outmsg_l <- ExceptT $ runDelayedYamlLoaderL msg_dirs get_outmsg
out_msg <- tryWxppWsResultE "fromWxppOutMsgL" $
tryYamlExcE $ fromWxppOutMsgL msg_dirs cache get_atk outmsg_l
return $ map ((primary,) . Just) $ out_msgs_abort <> [ out_msg ]
where
primary = True
app_id = procAppIdInfoReceiverId app_info
match_entry typ_str (WxppTalkerAbortStateEntry p _) = getStateType p == typ_str
-- }}}1
processInMsgByWxTalk :: (HasStateType s, Eq s, FromJSON s, ToJSON s
, MonadLoggerIO m
-- , WxppApiMonad env m
, WxTalkerState r m s
, WxTalkerDoneAction r m s
)
=> WxppDbRunner
-> Proxy s
-> r
-> Entity WxppTalkState
-> WxppInMsgEntity
-> m (Either String (Maybe WxppInMsgHandlerResult))
processInMsgByWxTalk db_runner state_proxy env (Entity state_id state_rec) ime = do
let state_type = wxppTalkStateTyp state_rec
if (state_type /= getStateType state_proxy)
then return $ Right Nothing
else do
-- 处理会话时,状态未必会更新
-- 更新时间,以表明这个会话不是 idle 的
now <- liftIO getCurrentTime
log_func <- askLoggerIO
liftIO $ flip runLoggingT log_func $ runWxppDB db_runner $
update state_id [ WxppTalkStateUpdatedTime =. now ]
liftM (fmap Just) $
wxTalkerInputProcessInMsg
get_st set_st
env
(Just ime)
where
set_st _open_id = saveAnyWxppTalkState db_runner state_id
get_st _open_id = mkWxTalkerMonad $ \_ -> loadAnyWxppTalkState db_runner state_proxy state_id
processJustInitedWxTalk :: ( MonadLoggerIO m
, Eq s, FromJSON s, ToJSON s, HasStateType s
, WxTalkerDoneAction r m s
, WxTalkerState r m s
)
=> WxppDbRunner
-> Proxy s
-> r
-> WxppTalkStateId
-> m (Either String WxppInMsgHandlerResult)
processJustInitedWxTalk db_runner state_proxy env state_id = runExceptT $ do
ExceptT $ wxTalkerInputProcessJustInited get_st set_st env
where set_st = saveAnyWxppTalkState db_runner state_id
get_st = mkWxTalkerMonad $ \_ -> loadAnyWxppTalkState db_runner state_proxy state_id
-- vim: set foldmethod=marker:
|
yoo-e/weixin-mp-sdk
|
WeiXin/PublicPlatform/Conversation/Yesod.hs
|
Haskell
|
mit
| 26,066
|
module Exercise where
perfect :: Int -> Bool
perfect n = sum (delers n) == n
delers :: Int -> [Int]
delers n = [x | x <- [1..n-1], n `mod` x == 0]
perfectTill :: Int -> [Int]
perfectTill n = filter perfect [1..n]
|
tcoenraad/functioneel-programmeren
|
2012/opg1a.hs
|
Haskell
|
mit
| 217
|
module Graphics.UI.Gtk.WebKit.WebView.Concrete (
titleChanged,
resourceRequestStarting
) where
import Graphics.UI.Gtk.WebKit.NetworkRequest (NetworkRequest)
import Graphics.UI.Gtk.WebKit.NetworkResponse (NetworkResponse)
import Graphics.UI.Gtk.WebKit.WebFrame (WebFrame)
import Graphics.UI.Gtk.WebKit.WebResource (WebResource)
import Graphics.UI.Gtk.WebKit.WebView (WebView)
import qualified Graphics.UI.Gtk.WebKit.WebView as Signal (titleChanged, resourceRequestStarting)
import System.Glib.Signals (Signal)
titleChanged :: Signal WebView (WebFrame -> String -> IO ())
titleChanged = Signal.titleChanged
resourceRequestStarting :: Signal WebView (WebFrame -> WebResource -> Maybe NetworkRequest -> Maybe NetworkResponse -> IO ())
resourceRequestStarting = Signal.resourceRequestStarting
|
fmap/hwb
|
src/Graphics/UI/Gtk/WebKit/WebView/Concrete.hs
|
Haskell
|
mit
| 795
|
{-# LANGUAGE MultiParamTypeClasses #-}
{-# LANGUAGE OverloadedStrings #-}
{-# LANGUAGE TypeApplications #-}
module Examples.Rpc.EchoServer (main) where
import Network.Simple.TCP (serve)
import Capnp.New (SomeServer, def, defaultLimit, export, handleParsed)
import Capnp.Rpc (ConnConfig(..), handleConn, socketTransport, toClient)
import Capnp.Gen.Echo.New
data MyEchoServer = MyEchoServer
instance SomeServer MyEchoServer
instance Echo'server_ MyEchoServer where
echo'echo MyEchoServer = handleParsed $ \params ->
pure def { reply = query params }
main :: IO ()
main = serve "localhost" "4000" $ \(sock, _addr) ->
handleConn (socketTransport sock defaultLimit) def
{ debugMode = True
, getBootstrap = \sup -> Just . toClient <$> export @Echo sup MyEchoServer
}
|
zenhack/haskell-capnp
|
examples/lib/Examples/Rpc/EchoServer.hs
|
Haskell
|
mit
| 817
|
{-# LANGUAGE OverloadedStrings #-}
module Main where
import Control.Concurrent (threadDelay)
import Control.Monad (forever)
import Control.Monad.IO.Class (liftIO)
import Data.Text (Text)
import System.Environment (getArgs)
import System.Random (randomRIO)
import HBar
main :: IO ()
main = do
selection <- head <$> getArgs
case selection of
"fruit" -> runFruitPopulation
"gop" -> runGopPrimarySimulation
_ -> print ("fruit or gop" :: String)
runFruitPopulation :: IO ()
runFruitPopulation =
runHBar "http://localhost:8888" "Fruit popularity by country"
fruitPopulation
fruitPopulation :: HBar ()
fruitPopulation = do
addItemWithCategory "Apple" "Sweden" 88
addItemWithCategory "Orange" "Sweden" 110
addItemWithCategory "Banana" "Sweden" 23
addItemWithCategory "Apple" "Norway" 67
addItemWithCategory "Orange" "Norway" 15
addItemWithCategory "Banana" "Norway" 90
addItemWithCategory "Pineapple" "Denmark" 45
addItemWithCategory "Apple" "Denmark" 19
addItemWithCategory "Orange" "Denmark" 46
addItemWithCategory "Banana" "Denmark" 8
commit
runGopPrimarySimulation :: IO ()
runGopPrimarySimulation =
runHBar "http://localhost:8888" "GOP primary poll's simulation"
gopPrimarySimulation
gopPrimarySimulation :: HBar ()
gopPrimarySimulation =
forever $ do
state <- selectFrom states
candidate <- selectFrom candidates
vote <- fromIntegral <$> selectFrom votes
addItemWithCategory candidate state vote
commit
wait
selectFrom :: [a] -> HBar a
selectFrom xs = do
ind <- liftIO $ randomRIO (0, length xs - 1)
return $ xs !! ind
states :: [Text]
states = [ "Arizona", "Ohio", "Oregon", "Florida", "Texas"
, "Nevada", "North Carolina", "Washington" ]
candidates :: [Text]
candidates = [ "Trump", "Cruz", "Kasich", "Fiorina"
, "Rubio", "Bush", "Carson" ]
votes :: [Int]
votes = [1..25]
wait :: HBar ()
wait = liftIO $ threadDelay 1000000
|
kosmoskatten/plotly-hs
|
plotly-hbar-demo/src/Main.hs
|
Haskell
|
mit
| 1,971
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.