code
stringlengths 2
1.05M
| repo_name
stringlengths 5
101
| path
stringlengths 4
991
| language
stringclasses 3
values | license
stringclasses 5
values | size
int64 2
1.05M
|
|---|---|---|---|---|---|
module FireHazard
(grid,
instructions,
lightsLit)
where
import qualified Data.ByteString.Lazy.Char8 as C
import Data.Vector as V (replicate,accum, Vector, foldr, sum)
data Switch = ON | OFF | Toggle deriving (Eq, Show)
type Grid = V.Vector (V.Vector Int)
rv :: V.Vector Int
rv = V.replicate 1000 0
grid :: Grid
grid = V.replicate 1000 rv
lightsLit :: Grid -> Int
lightsLit g = V.foldr (\e a -> (V.sum e) + a) 0 g
updateCol :: Int -> Switch -> Int
updateCol e v
| v == Toggle && e == 0 = 1
| v == Toggle && e == 1 = 0
| v == ON && e == 0 = 1
| v == OFF && e == 1 = 0
| otherwise = e
updateRow :: V.Vector Int -> [(Int,Switch)] -> V.Vector Int
updateRow v cu = V.accum updateCol v cu
updateGrid :: Grid -> [(Int,[(Int,Switch)])] -> Grid
updateGrid g ru = V.accum updateRow g ru
switchInst :: [C.ByteString] -> Switch
switchInst bs
| (head bs) == C.pack "turn" && (head $ tail bs) == C.pack "on" = ON
| (head bs) == C.pack "toggle" = Toggle
| otherwise = OFF
bsToGrid :: C.ByteString -> Int
bsToGrid bs = case C.readInt bs of
Nothing -> 0
Just (n, _) -> n
startEndInst :: [C.ByteString] -> ([Int], [Int])
startEndInst xs
| head xs == C.pack "turn" =
(map bsToGrid (C.split ',' (xs !! 2)),
map bsToGrid (C.split ',' (xs !! 4)))
| otherwise = ( map bsToGrid (C.split ',' (xs !! 1)),
map bsToGrid (C.split ',' (xs !! 3)))
instructions :: C.ByteString -> Grid -> Grid
instructions bs g = updateGrid g r
where s = C.split ' ' bs
si = switchInst s
((cs:rs:_), (ce:re:_)) = startEndInst s
c = [(x, si) | x <- [cs..ce]]
r = [(y, c) | y <- [rs..re]]
|
cvsekhar/adventofcode
|
src/FireHazard.hs
|
Haskell
|
bsd-3-clause
| 1,901
|
{-# LANGUAGE OverloadedStrings #-}
module Text.Authoring.Combinator.Meta where
import Control.Monad.Writer
import Data.Text (Text)
import Text.Authoring.Document
import Text.Authoring.Combinator.Writer
-- | wrap the argument @x@ using curly braces "{x}"
braces :: (MonadWriter t m, HasDocument t) => m () -> m ()
braces con = do
raw "{"
con
raw "}"
-- | command0 x = "\x "
command0 :: (MonadWriter t m, HasDocument t) => Text -> m ()
command0 x = do
raw "\\"
raw x
raw "{}"
-- | command1 x con = "\x{con}"
command1 :: (MonadWriter t m, HasDocument t) => Text -> m () -> m ()
command1 x con = do
raw "\\"
raw x
raw "{"
con
raw "}"
-- | commandI x con = "{\x con}"
commandI :: (MonadWriter t m, HasDocument t) => Text -> m () -> m ()
commandI x con = do
raw "{\\"
raw x
raw " "
con
raw "}"
-- | environment x con = "\begin{x}con\end{x}"
environment :: (MonadWriter t m, HasDocument t) => Text -> m () -> m ()
environment x con = do
raw "\\begin{"
raw x
raw "}"
con
raw "\\end{"
raw x
raw "}"
|
nushio3/authoring
|
src/Text/Authoring/Combinator/Meta.hs
|
Haskell
|
bsd-3-clause
| 1,090
|
lend amount balance = let reserve = 100
newBalance = balance - amount
in if balance < reserve
then Nothing
else Just newBalance
lend2 amount balance = if amount < reserve * 0.5
then Just newBalance
else Nothing
where reserve = 100
newBalance = balance - amount
lend3 amount balance
| amount <= 0 = Nothing
| amount > reserve * 0.5 = Nothing
| otherwise = Just newBalance
where reserve = 100
newBalance = balance - amount
|
NeonGraal/rwh-stack
|
ch03/Lending.hs
|
Haskell
|
bsd-3-clause
| 656
|
-- Copyright (c) 2016-present, Facebook, Inc.
-- All rights reserved.
--
-- This source code is licensed under the BSD-style license found in the
-- LICENSE file in the root directory of this source tree.
module Duckling.Quantity.EN.Tests
( tests
) where
import Data.String
import Prelude
import Test.Tasty
import Duckling.Dimensions.Types
import Duckling.Quantity.EN.Corpus
import Duckling.Testing.Asserts
tests :: TestTree
tests = testGroup "EN Tests"
[ makeCorpusTest [Seal Quantity] corpus
, makeCorpusTest [Seal Quantity] latentCorpus
]
|
facebookincubator/duckling
|
tests/Duckling/Quantity/EN/Tests.hs
|
Haskell
|
bsd-3-clause
| 557
|
{-# LANGUAGE OverloadedStrings #-}
{-# LANGUAGE DeriveGeneric #-}
module Main where
import Arch
import MainCommon
import Data.String.Conv (toS)
import Data.Aeson (encode)
main :: IO ()
main = do
doc <- getDocumentByArgs
let packageStats = PackagesStats
<$> f "core"
<*> f "extra"
<*> f "community"
<*> f "multilib"
<*> f "unknown"
where f x = extractRights ( parseArchDoc x $ doc )
case packageStats of
Right pkgs -> do
writeFile "packageStatistics.json" . toS $ encode pkgs
print ("Success" :: String)
Left l -> putStrLn l
|
chrissound/ArchLinuxPkgStatsScraper
|
app/Main.hs
|
Haskell
|
bsd-3-clause
| 628
|
module WeightForWeight where
import Data.Char (digitToInt)
import Data.List (sortBy)
import Data.Ord (comparing)
-- | Sort numbers by the sum of their digits (5 kyu)
-- | Link: https://biturl.io/SortWeight
-- | Refactored solution I came up with after completition of this kata
-- originally I compared the number and digSum separately (see Lesson learned)
orderWeight :: String -> String
orderWeight xs = unwords $ sortBy (comparing digSum) (words xs)
where
digSum x = (sum $ map digitToInt x, x)
-- | Lesson learned: When comparing tuples, x1 and x2 are compared first
-- and then if they are equal, y1 and y2 are compared second etc.
|
Eugleo/Code-Wars
|
src/number-kata/WeightForWeight.hs
|
Haskell
|
bsd-3-clause
| 646
|
import Graphics.Rendering.Chart.Easy
import Graphics.Rendering.Chart.Backend.Cairo
import Data.Time.LocalTime
import Test.Examples.Prices(prices,mkDate,filterPrices)
prices' :: [(LocalTime,Double,Double)]
prices' = filterPrices prices (mkDate 1 1 2005) (mkDate 31 12 2006)
main = toFile def "example2_big.png" $ do
layoutlr_title .= "Price History"
layoutlr_left_axis . laxis_override .= axisGridHide
layoutlr_right_axis . laxis_override .= axisGridHide
plotLeft (line "price 1" [ [ (d,v) | (d,v,_) <- prices'] ])
plotRight (line "price 2" [ [ (d,v) | (d,_,v) <- prices'] ])
{-
import System.Environment(getArgs)
import Data.Colour.Names
import Data.Colour
import Control.Lens
import Data.Default.Class
import Data.Time.LocalTime
import Graphics.Rendering.Chart
import Graphics.Rendering.Chart.Backend.Cairo
import Prices(prices,mkDate,filterPrices)
prices' :: [(LocalTime,Double,Double)]
prices' = filterPrices prices (mkDate 1 1 2005) (mkDate 31 12 2006)
chart = toRenderable layout
where
price1 = plot_lines_style . line_color .~ opaque blue
$ plot_lines_values .~ [ [ (d,v) | (d,v,_) <- prices'] ]
$ plot_lines_title .~ "price 1"
$ def
price2 = plot_lines_style . line_color .~ opaque green
$ plot_lines_values .~ [ [ (d,v) | (d,_,v) <- prices'] ]
$ plot_lines_title .~ "price 2"
$ def
layout = layoutlr_title .~"Price History"
$ layoutlr_left_axis . laxis_override .~ axisGridHide
$ layoutlr_right_axis . laxis_override .~ axisGridHide
$ layoutlr_x_axis . laxis_override .~ axisGridHide
$ layoutlr_plots .~ [Left (toPlot price1),
Right (toPlot price2)]
$ layoutlr_grid_last .~ False
$ def
main = renderableToFile def "example2_big.png" chart
-}
|
visood/bioalgo
|
test/Test/Examples/Plots/priceHistory.hs
|
Haskell
|
bsd-3-clause
| 1,859
|
{-# LANGUAGE RebindableSyntax #-}
-- Copyright : (C) 2009 Corey O'Connor
-- License : BSD-style (see the file LICENSE)
import Bind.Marshal.Prelude
import Bind.Marshal.Verify
import Bind.Marshal.StdLib.Dynamic.ByteString.Lazy.Des
main = run_test $ do
returnM () :: Test ()
|
coreyoconnor/bind-marshal
|
test/verify_stdlib_dynamic_bytestring_lazy_des.hs
|
Haskell
|
bsd-3-clause
| 288
|
{-# LANGUAGE DataKinds #-}
{-# LANGUAGE DeriveGeneric #-}
{-# LANGUAGE FlexibleInstances #-}
{-# LANGUAGE GeneralizedNewtypeDeriving #-}
{-# LANGUAGE MultiParamTypeClasses #-}
{-# LANGUAGE OverloadedStrings #-}
{-# LANGUAGE ScopedTypeVariables #-}
{-# LANGUAGE TypeOperators #-}
module Server where
import Control.Monad.IO.Class
import Control.Monad.Reader
import Control.Monad.Trans.Either
import Data.Aeson
import Data.Aeson.Types
import Data.Attoparsec.ByteString
import Data.ByteString (ByteString)
import Data.Int
import Data.List
import Data.String.Conversions
import Data.Time.Calendar
import GHC.Generics
import Lucid
import Servant.HTML.Lucid(HTML)
import Network.HTTP.Media ((//), (/:))
import Network.Wai
import Network.Wai.Handler.Warp
import Servant
import System.Directory
import Text.Blaze
import Text.Blaze.Html.Renderer.Utf8
import qualified Data.Aeson.Parser
import qualified Text.Blaze.Html
main :: IO ()
main = run 8081 app
-- 'serve' comes from servant and hands you a WAI Application,
-- which you can think of as an "abstract" web application,
-- not yet a webserver.
app :: Application
app = serve (Proxy :: Proxy (UserAPI :<|> PersonAPI)) (server :<|> server4)
type API = UserAPI :<|> PersonAPI
type UserAPI = "users" :> Get '[JSON] [User]
:<|> "albert" :> Get '[JSON] User
:<|> "isaac" :> Get '[JSON] User
type PersonAPI = "persons" :> Get '[JSON, HTML] [Person]
--------------
-------------
data User = User
{ name :: String
, age :: Int
, email :: String
-- , registration_date :: Day
} deriving (Eq, Show, Generic)
instance ToJSON User
isaac :: User
isaac = User "Isaac Newton" 372 "isaac@newton.co.uk" -- (fromGregorian 1683 3 1)
albert :: User
albert = User "Albert Einstein" 136 "ae@mc2.org" -- (fromGregorian 1905 12 1)
users :: [User]
users = [isaac, albert]
server :: Server UserAPI
server = return users
:<|> return albert
:<|> return isaac
server4 = return persons
userAPI :: Proxy UserAPI
userAPI = Proxy
data Person = Person
{ firstName :: String
, lastName :: String
} deriving Generic -- for the JSON instance
instance ToJSON Person
-- HTML serialization of a single person
instance ToHtml Person where
toHtml person =
tr_ $ do
td_ (toHtml $ firstName person)
td_ (toHtml $ lastName person)
-- do not worry too much about this
toHtmlRaw = toHtml
-- HTML serialization of a list of persons
instance ToHtml [Person] where
toHtml persons = table_ $ do
tr_ $ do
th_ "first name"
th_ "last name"
-- this just calls toHtml on each person of the list
-- and concatenates the resulting pieces of HTML together
foldMap toHtml persons
toHtmlRaw = toHtml
persons :: [Person]
persons =
[ Person "Isaac" "Newton"
, Person "Albert" "Einstein"
]
personAPI :: Proxy PersonAPI
personAPI = Proxy
|
odr/pers
|
app/Server.hs
|
Haskell
|
bsd-3-clause
| 2,863
|
{-# LANGUAGE BangPatterns #-}
{-# LANGUAGE RecordWildCards #-}
module IOCP.Worker (
Worker,
new,
enqueue,
-- * Helpers
forkOSUnmasked,
) where
import Control.Concurrent
import Control.Exception (mask_)
import Control.Monad (forever, void)
import Data.IORef
import GHC.IO (unsafeUnmask)
data Worker = Worker
{ workerJobs :: !(IORef JobList)
, workerWake :: !(MVar ())
}
instance Eq Worker where
Worker a _ == Worker b _ = a == b
-- | Fork an OS thread, and return a handle for sending jobs to it.
new :: IO Worker
new = do
workerJobs <- newIORef id
workerWake <- newEmptyMVar
_ <- forkOSUnmasked $ forever $ do
takeMVar workerWake
jobs <- atomicModifyIORef workerJobs $ \jobs -> (id, jobs)
runJobList jobs
return Worker{..}
-- | Add a job to the work queue. Jobs are executed in the order they are
-- queued, and every job is run in the same OS thread.
--
-- A job should not block for long or throw an exception, as this will prevent
-- future jobs from running.
--
-- Exception safety: atomic, non-interruptible
enqueue :: Worker -> IO () -> IO ()
enqueue Worker{..} io =
mask_ $ do
!() <- atomicModifyIORef workerJobs $ \jobs -> (snocJobList jobs io, ())
void $ tryPutMVar workerWake ()
------------------------------------------------------------------------
-- Helpers
forkOSUnmasked :: IO () -> IO ThreadId
forkOSUnmasked = forkOS . unsafeUnmask
-- A difference list, but with (x >>) instead of (x :)
type JobList = IO () -> IO ()
-- | Append an action to the job list, so it will
-- run /after/ the existing actions.
snocJobList :: JobList -> IO () -> JobList
snocJobList dl io = dl . (io >>)
runJobList :: JobList -> IO ()
runJobList dl = dl (return ())
|
joeyadams/haskell-iocp
|
IOCP/Worker.hs
|
Haskell
|
bsd-3-clause
| 1,794
|
module Graphomania.Builder.FromVertices
(
) where
-- TODO:
-- - Граф Fold по вершинам.
-- - Вершина. Внешний ID; Тэг (не испльзуется); Fold по рёбрам.
-- - Ребро. ID вершины, на которую оно указывает + Тэг.
-- ID вершины является внутренним идентификатором, но для вершины исходного графа.
-- 1. не использовать здесь внешние ID вершин.
-- они у нас будут храниться как тёги.
-- 2. Внутренние идентификаторы вершин меняются, как следствие мы строим
-- 2 карты: ИД исход -> Ид целевая, Ид целевая -> ИД исход
|
schernichkin/BSPM
|
graphomania/src/Graphomania/Builder/FromVertices.hs
|
Haskell
|
bsd-3-clause
| 829
|
-- | Some auxiliary crypto types
module UTxO.Crypto (
-- * Key-pairs
RegularKeyPair(..)
, EncKeyPair(..)
, RedeemKeyPair(..)
, regularKeyPair
, encKeyPair
, encToRegular
-- * Abstract API
, SomeKeyPair(..)
, TxOwnedInput
, ClassifiedInputs(..)
, classifyInputs
-- * Delegation
, DelegatedTo(..)
) where
import Formatting (bprint, build, (%))
import qualified Formatting.Buildable
import Universum
import Pos.Chain.Delegation (ProxySKHeavy)
import Pos.Chain.Txp (TxIn)
import Pos.Core
import Pos.Crypto
{-------------------------------------------------------------------------------
Keypairs
-------------------------------------------------------------------------------}
data RegularKeyPair = RegularKeyPair {
regKpSec :: SecretKey
, regKpPub :: PublicKey
, regKpHash :: AddressHash PublicKey
}
deriving (Show)
data EncKeyPair = EncKeyPair {
encKpEnc :: EncryptedSecretKey
, encKpSec :: SecretKey
, encKpPub :: PublicKey
, encKpHash :: AddressHash PublicKey
}
deriving (Show)
data RedeemKeyPair = RedeemKeyPair {
redKpSec :: RedeemSecretKey
, redKpPub :: RedeemPublicKey
}
deriving (Show)
regularKeyPair :: SecretKey -> RegularKeyPair
regularKeyPair regKpSec = RegularKeyPair {..}
where
regKpPub = toPublic regKpSec
regKpHash = addressHash regKpPub
encKeyPair :: EncryptedSecretKey -> EncKeyPair
encKeyPair encKpEnc = EncKeyPair {..}
where
encKpSec = encToSecret encKpEnc
encKpPub = encToPublic encKpEnc
encKpHash = addressHash encKpPub
encToRegular :: EncKeyPair -> RegularKeyPair
encToRegular EncKeyPair{..} = RegularKeyPair{..}
where
regKpSec = encKpSec
regKpPub = encKpPub
regKpHash = encKpHash
{-------------------------------------------------------------------------------
Abstract over the various kinds of keypairs we have
-------------------------------------------------------------------------------}
data SomeKeyPair =
KeyPairRegular RegularKeyPair
| KeyPairEncrypted EncKeyPair
| KeyPairRedeem RedeemKeyPair
-- | An input to a transaction together with evidence that it's yours
--
-- (This is the singular form of 'TxOwnedInputs', which is defined in the
-- Cardano core libraries.)
type TxOwnedInput a = (a, TxIn)
data ClassifiedInputs =
-- | This is regular set of inputs
InputsRegular [TxOwnedInput RegularKeyPair]
-- | When redeeming from an AVVM address, we can only have a single input
| InputsRedeem (TxOwnedInput RedeemKeyPair)
-- | Classify a set of inputs
--
-- Maybe return an error message if the transaction contains an invalid
-- combination of inputs.
classifyInputs :: [TxOwnedInput SomeKeyPair] -> Either Text ClassifiedInputs
classifyInputs = \case
[] -> Left "No inputs"
(i:is) -> case classifyInput i of
Left i' -> go (InputsRegular [i']) is
Right i' -> go (InputsRedeem i' ) is
where
go :: ClassifiedInputs -> [TxOwnedInput SomeKeyPair] -> Either Text ClassifiedInputs
go acc [] = Right $ reverseAcc acc
go (InputsRedeem _) (_:_) = Left "Can only have a single redemption input"
go (InputsRegular acc) (i:is) =
case classifyInput i of
Left i' -> go (InputsRegular (i':acc)) is
Right _ -> Left "Cannot mix redemption inputs with other inputs"
classifyInput :: TxOwnedInput SomeKeyPair
-> Either (TxOwnedInput RegularKeyPair)
(TxOwnedInput RedeemKeyPair)
classifyInput (KeyPairRegular kp, inp) = Left (kp, inp)
classifyInput (KeyPairEncrypted kp, inp) = Left (encToRegular kp, inp)
classifyInput (KeyPairRedeem kp, inp) = Right (kp, inp)
reverseAcc :: ClassifiedInputs -> ClassifiedInputs
reverseAcc (InputsRegular inps) = InputsRegular $ reverse inps
reverseAcc (InputsRedeem inp) = InputsRedeem $ inp
{-------------------------------------------------------------------------------
Delegation
-------------------------------------------------------------------------------}
data DelegatedTo a = DelegatedTo {
delTo :: a
, delPSK :: ProxySKHeavy
}
deriving (Show)
{-------------------------------------------------------------------------------
Pretty-printing
-------------------------------------------------------------------------------}
instance Buildable RegularKeyPair where
build RegularKeyPair{..} = bprint
( "RegularKeyPair"
% "{ sec: " % build
% ", pub: " % build
% ", hash: " % build
% "}"
)
regKpSec
regKpPub
regKpHash
instance Buildable EncKeyPair where
build EncKeyPair{..} = bprint
( "EncKeyPair"
% "{ sec: " % build
% ", pub: " % build
% ", hash: " % build
% "}"
)
encKpSec
encKpPub
encKpHash
instance Buildable RedeemKeyPair where
build RedeemKeyPair{..} = bprint
( "RedeemKeyPair"
% "{ sec: " % build
% ", pub: " % build
% "}"
)
redKpSec
redKpPub
instance Buildable a => Buildable (DelegatedTo a) where
build DelegatedTo{..} = bprint
( "DelegatedTo"
% "{ to: " % build
% ", psk: " % build
% "}"
)
delTo
delPSK
|
input-output-hk/cardano-sl
|
utxo/src/UTxO/Crypto.hs
|
Haskell
|
apache-2.0
| 5,350
|
import Application (appMain)
import Prelude (IO)
main :: IO ()
main = appMain
|
sulami/hGM
|
app/main.hs
|
Haskell
|
bsd-3-clause
| 103
|
-- (c) The University of Glasgow 2006
--
-- FamInstEnv: Type checked family instance declarations
{-# LANGUAGE CPP, GADTs, ScopedTypeVariables #-}
module FamInstEnv (
FamInst(..), FamFlavor(..), famInstAxiom, famInstTyCon, famInstRHS,
famInstsRepTyCons, famInstRepTyCon_maybe, dataFamInstRepTyCon,
pprFamInst, pprFamInsts,
mkImportedFamInst,
FamInstEnvs, FamInstEnv, emptyFamInstEnv, emptyFamInstEnvs,
extendFamInstEnv, deleteFromFamInstEnv, extendFamInstEnvList,
identicalFamInstHead, famInstEnvElts, familyInstances,
-- * CoAxioms
mkCoAxBranch, mkBranchedCoAxiom, mkUnbranchedCoAxiom, mkSingleCoAxiom,
mkNewTypeCoAxiom,
FamInstMatch(..),
lookupFamInstEnv, lookupFamInstEnvConflicts, lookupFamInstEnvByTyCon,
isDominatedBy, apartnessCheck,
-- Injectivity
InjectivityCheckResult(..),
lookupFamInstEnvInjectivityConflicts, injectiveBranches,
-- Normalisation
topNormaliseType, topNormaliseType_maybe,
normaliseType, normaliseTcApp,
reduceTyFamApp_maybe,
-- Flattening
flattenTys
) where
#include "HsVersions.h"
import Unify
import Type
import TyCoRep
import TyCon
import Coercion
import CoAxiom
import VarSet
import VarEnv
import Name
import PrelNames ( eqPrimTyConKey )
import UniqFM
import Outputable
import Maybes
import TrieMap
import Unique
import Util
import Var
import Pair
import SrcLoc
import FastString
import MonadUtils
import Control.Monad
import Data.Function ( on )
import Data.List( mapAccumL )
{-
************************************************************************
* *
Type checked family instance heads
* *
************************************************************************
Note [FamInsts and CoAxioms]
~~~~~~~~~~~~~~~~~~~~~~~~~~~~
* CoAxioms and FamInsts are just like
DFunIds and ClsInsts
* A CoAxiom is a System-FC thing: it can relate any two types
* A FamInst is a Haskell source-language thing, corresponding
to a type/data family instance declaration.
- The FamInst contains a CoAxiom, which is the evidence
for the instance
- The LHS of the CoAxiom is always of form F ty1 .. tyn
where F is a type family
-}
data FamInst -- See Note [FamInsts and CoAxioms]
= FamInst { fi_axiom :: CoAxiom Unbranched -- The new coercion axiom
-- introduced by this family
-- instance
-- INVARIANT: apart from freshening (see below)
-- fi_tvs = cab_tvs of the (single) axiom branch
-- fi_cvs = cab_cvs ...ditto...
-- fi_tys = cab_lhs ...ditto...
-- fi_rhs = cab_rhs ...ditto...
, fi_flavor :: FamFlavor
-- Everything below here is a redundant,
-- cached version of the two things above
-- except that the TyVars are freshened
, fi_fam :: Name -- Family name
-- Used for "rough matching"; same idea as for class instances
-- See Note [Rough-match field] in InstEnv
, fi_tcs :: [Maybe Name] -- Top of type args
-- INVARIANT: fi_tcs = roughMatchTcs fi_tys
-- Used for "proper matching"; ditto
, fi_tvs :: [TyVar] -- Template tyvars for full match
, fi_cvs :: [CoVar] -- Template covars for full match
-- Like ClsInsts, these variables are always fresh
-- See Note [Template tyvars are fresh] in InstEnv
, fi_tys :: [Type] -- The LHS type patterns
-- May be eta-reduced; see Note [Eta reduction for data families]
, fi_rhs :: Type -- the RHS, with its freshened vars
}
data FamFlavor
= SynFamilyInst -- A synonym family
| DataFamilyInst TyCon -- A data family, with its representation TyCon
{- Note [Eta reduction for data families]
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Consider this
data family T a b :: *
newtype instance T Int a = MkT (IO a) deriving( Monad )
We'd like this to work.
From the 'newtype instance' you might think we'd get:
newtype TInt a = MkT (IO a)
axiom ax1 a :: T Int a ~ TInt a -- The newtype-instance part
axiom ax2 a :: TInt a ~ IO a -- The newtype part
But now what can we do? We have this problem
Given: d :: Monad IO
Wanted: d' :: Monad (T Int) = d |> ????
What coercion can we use for the ???
Solution: eta-reduce both axioms, thus:
axiom ax1 :: T Int ~ TInt
axiom ax2 :: TInt ~ IO
Now
d' = d |> Monad (sym (ax2 ; ax1))
This eta reduction happens for data instances as well as newtype
instances. Here we want to eta-reduce the data family axiom.
All this is done in TcInstDcls.tcDataFamInstDecl.
See also Note [Newtype eta] in TyCon.
Bottom line:
For a FamInst with fi_flavour = DataFamilyInst rep_tc,
- fi_tvs may be shorter than tyConTyVars of rep_tc
- fi_tys may be shorter than tyConArity of the family tycon
i.e. LHS is unsaturated
- fi_rhs will be (rep_tc fi_tvs)
i.e. RHS is un-saturated
But when fi_flavour = SynFamilyInst,
- fi_tys has the exact arity of the family tycon
-}
-- Obtain the axiom of a family instance
famInstAxiom :: FamInst -> CoAxiom Unbranched
famInstAxiom = fi_axiom
-- Split the left-hand side of the FamInst
famInstSplitLHS :: FamInst -> (TyCon, [Type])
famInstSplitLHS (FamInst { fi_axiom = axiom, fi_tys = lhs })
= (coAxiomTyCon axiom, lhs)
-- Get the RHS of the FamInst
famInstRHS :: FamInst -> Type
famInstRHS = fi_rhs
-- Get the family TyCon of the FamInst
famInstTyCon :: FamInst -> TyCon
famInstTyCon = coAxiomTyCon . famInstAxiom
-- Return the representation TyCons introduced by data family instances, if any
famInstsRepTyCons :: [FamInst] -> [TyCon]
famInstsRepTyCons fis = [tc | FamInst { fi_flavor = DataFamilyInst tc } <- fis]
-- Extracts the TyCon for this *data* (or newtype) instance
famInstRepTyCon_maybe :: FamInst -> Maybe TyCon
famInstRepTyCon_maybe fi
= case fi_flavor fi of
DataFamilyInst tycon -> Just tycon
SynFamilyInst -> Nothing
dataFamInstRepTyCon :: FamInst -> TyCon
dataFamInstRepTyCon fi
= case fi_flavor fi of
DataFamilyInst tycon -> tycon
SynFamilyInst -> pprPanic "dataFamInstRepTyCon" (ppr fi)
{-
************************************************************************
* *
Pretty printing
* *
************************************************************************
-}
instance NamedThing FamInst where
getName = coAxiomName . fi_axiom
instance Outputable FamInst where
ppr = pprFamInst
-- Prints the FamInst as a family instance declaration
-- NB: FamInstEnv.pprFamInst is used only for internal, debug printing
-- See pprTyThing.pprFamInst for printing for the user
pprFamInst :: FamInst -> SDoc
pprFamInst famInst
= hang (pprFamInstHdr famInst) 2 (ifPprDebug debug_stuff)
where
ax = fi_axiom famInst
debug_stuff = vcat [ text "Coercion axiom:" <+> ppr ax
, text "Tvs:" <+> ppr (fi_tvs famInst)
, text "LHS:" <+> ppr (fi_tys famInst)
, text "RHS:" <+> ppr (fi_rhs famInst) ]
pprFamInstHdr :: FamInst -> SDoc
pprFamInstHdr fi@(FamInst {fi_flavor = flavor})
= pprTyConSort <+> pp_instance <+> pp_head
where
-- For *associated* types, say "type T Int = blah"
-- For *top level* type instances, say "type instance T Int = blah"
pp_instance
| isTyConAssoc fam_tc = empty
| otherwise = text "instance"
(fam_tc, etad_lhs_tys) = famInstSplitLHS fi
vanilla_pp_head = pprTypeApp fam_tc etad_lhs_tys
pp_head | DataFamilyInst rep_tc <- flavor
, isAlgTyCon rep_tc
, let extra_tvs = dropList etad_lhs_tys (tyConTyVars rep_tc)
, not (null extra_tvs)
= getPprStyle $ \ sty ->
if debugStyle sty
then vanilla_pp_head -- With -dppr-debug just show it as-is
else pprTypeApp fam_tc (etad_lhs_tys ++ mkTyVarTys extra_tvs)
-- Without -dppr-debug, eta-expand
-- See Trac #8674
-- (This is probably over the top now that we use this
-- only for internal debug printing; PprTyThing.pprFamInst
-- is used for user-level printing.)
| otherwise
= vanilla_pp_head
pprTyConSort = case flavor of
SynFamilyInst -> text "type"
DataFamilyInst tycon
| isDataTyCon tycon -> text "data"
| isNewTyCon tycon -> text "newtype"
| isAbstractTyCon tycon -> text "data"
| otherwise -> text "WEIRD" <+> ppr tycon
pprFamInsts :: [FamInst] -> SDoc
pprFamInsts finsts = vcat (map pprFamInst finsts)
{-
Note [Lazy axiom match]
~~~~~~~~~~~~~~~~~~~~~~~
It is Vitally Important that mkImportedFamInst is *lazy* in its axiom
parameter. The axiom is loaded lazily, via a forkM, in TcIface. Sometime
later, mkImportedFamInst is called using that axiom. However, the axiom
may itself depend on entities which are not yet loaded as of the time
of the mkImportedFamInst. Thus, if mkImportedFamInst eagerly looks at the
axiom, a dependency loop spontaneously appears and GHC hangs. The solution
is simply for mkImportedFamInst never, ever to look inside of the axiom
until everything else is good and ready to do so. We can assume that this
readiness has been achieved when some other code pulls on the axiom in the
FamInst. Thus, we pattern match on the axiom lazily (in the where clause,
not in the parameter list) and we assert the consistency of names there
also.
-}
-- Make a family instance representation from the information found in an
-- interface file. In particular, we get the rough match info from the iface
-- (instead of computing it here).
mkImportedFamInst :: Name -- Name of the family
-> [Maybe Name] -- Rough match info
-> CoAxiom Unbranched -- Axiom introduced
-> FamInst -- Resulting family instance
mkImportedFamInst fam mb_tcs axiom
= FamInst {
fi_fam = fam,
fi_tcs = mb_tcs,
fi_tvs = tvs,
fi_cvs = cvs,
fi_tys = tys,
fi_rhs = rhs,
fi_axiom = axiom,
fi_flavor = flavor }
where
-- See Note [Lazy axiom match]
~(CoAxBranch { cab_lhs = tys
, cab_tvs = tvs
, cab_cvs = cvs
, cab_rhs = rhs }) = coAxiomSingleBranch axiom
-- Derive the flavor for an imported FamInst rather disgustingly
-- Maybe we should store it in the IfaceFamInst?
flavor = case splitTyConApp_maybe rhs of
Just (tc, _)
| Just ax' <- tyConFamilyCoercion_maybe tc
, ax' == axiom
-> DataFamilyInst tc
_ -> SynFamilyInst
{-
************************************************************************
* *
FamInstEnv
* *
************************************************************************
Note [FamInstEnv]
~~~~~~~~~~~~~~~~~
A FamInstEnv maps a family name to the list of known instances for that family.
The same FamInstEnv includes both 'data family' and 'type family' instances.
Type families are reduced during type inference, but not data families;
the user explains when to use a data family instance by using contructors
and pattern matching.
Nevertheless it is still useful to have data families in the FamInstEnv:
- For finding overlaps and conflicts
- For finding the representation type...see FamInstEnv.topNormaliseType
and its call site in Simplify
- In standalone deriving instance Eq (T [Int]) we need to find the
representation type for T [Int]
Note [Varying number of patterns for data family axioms]
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
For data families, the number of patterns may vary between instances.
For example
data family T a b
data instance T Int a = T1 a | T2
data instance T Bool [a] = T3 a
Then we get a data type for each instance, and an axiom:
data TInt a = T1 a | T2
data TBoolList a = T3 a
axiom ax7 :: T Int ~ TInt -- Eta-reduced
axiom ax8 a :: T Bool [a] ~ TBoolList a
These two axioms for T, one with one pattern, one with two;
see Note [Eta reduction for data families]
-}
type FamInstEnv = UniqFM FamilyInstEnv -- Maps a family to its instances
-- See Note [FamInstEnv]
type FamInstEnvs = (FamInstEnv, FamInstEnv)
-- External package inst-env, Home-package inst-env
newtype FamilyInstEnv
= FamIE [FamInst] -- The instances for a particular family, in any order
instance Outputable FamilyInstEnv where
ppr (FamIE fs) = text "FamIE" <+> vcat (map ppr fs)
-- INVARIANTS:
-- * The fs_tvs are distinct in each FamInst
-- of a range value of the map (so we can safely unify them)
emptyFamInstEnvs :: (FamInstEnv, FamInstEnv)
emptyFamInstEnvs = (emptyFamInstEnv, emptyFamInstEnv)
emptyFamInstEnv :: FamInstEnv
emptyFamInstEnv = emptyUFM
famInstEnvElts :: FamInstEnv -> [FamInst]
famInstEnvElts fi = [elt | FamIE elts <- eltsUFM fi, elt <- elts]
familyInstances :: (FamInstEnv, FamInstEnv) -> TyCon -> [FamInst]
familyInstances (pkg_fie, home_fie) fam
= get home_fie ++ get pkg_fie
where
get env = case lookupUFM env fam of
Just (FamIE insts) -> insts
Nothing -> []
extendFamInstEnvList :: FamInstEnv -> [FamInst] -> FamInstEnv
extendFamInstEnvList inst_env fis = foldl extendFamInstEnv inst_env fis
extendFamInstEnv :: FamInstEnv -> FamInst -> FamInstEnv
extendFamInstEnv inst_env
ins_item@(FamInst {fi_fam = cls_nm})
= addToUFM_C add inst_env cls_nm (FamIE [ins_item])
where
add (FamIE items) _ = FamIE (ins_item:items)
deleteFromFamInstEnv :: FamInstEnv -> FamInst -> FamInstEnv
-- Used only for overriding in GHCi
deleteFromFamInstEnv inst_env fam_inst@(FamInst {fi_fam = fam_nm})
= adjustUFM adjust inst_env fam_nm
where
adjust :: FamilyInstEnv -> FamilyInstEnv
adjust (FamIE items)
= FamIE (filterOut (identicalFamInstHead fam_inst) items)
identicalFamInstHead :: FamInst -> FamInst -> Bool
-- ^ True when the LHSs are identical
-- Used for overriding in GHCi
identicalFamInstHead (FamInst { fi_axiom = ax1 }) (FamInst { fi_axiom = ax2 })
= coAxiomTyCon ax1 == coAxiomTyCon ax2
&& numBranches brs1 == numBranches brs2
&& and ((zipWith identical_branch `on` fromBranches) brs1 brs2)
where
brs1 = coAxiomBranches ax1
brs2 = coAxiomBranches ax2
identical_branch br1 br2
= isJust (tcMatchTys lhs1 lhs2)
&& isJust (tcMatchTys lhs2 lhs1)
where
lhs1 = coAxBranchLHS br1
lhs2 = coAxBranchLHS br2
{-
************************************************************************
* *
Compatibility
* *
************************************************************************
Note [Apartness]
~~~~~~~~~~~~~~~~
In dealing with closed type families, we must be able to check that one type
will never reduce to another. This check is called /apartness/. The check
is always between a target (which may be an arbitrary type) and a pattern.
Here is how we do it:
apart(target, pattern) = not (unify(flatten(target), pattern))
where flatten (implemented in flattenTys, below) converts all type-family
applications into fresh variables. (See Note [Flattening].)
Note [Compatibility]
~~~~~~~~~~~~~~~~~~~~
Two patterns are /compatible/ if either of the following conditions hold:
1) The patterns are apart.
2) The patterns unify with a substitution S, and their right hand sides
equal under that substitution.
For open type families, only compatible instances are allowed. For closed
type families, the story is slightly more complicated. Consider the following:
type family F a where
F Int = Bool
F a = Int
g :: Show a => a -> F a
g x = length (show x)
Should that type-check? No. We need to allow for the possibility that 'a'
might be Int and therefore 'F a' should be Bool. We can simplify 'F a' to Int
only when we can be sure that 'a' is not Int.
To achieve this, after finding a possible match within the equations, we have to
go back to all previous equations and check that, under the
substitution induced by the match, other branches are surely apart. (See
Note [Apartness].) This is similar to what happens with class
instance selection, when we need to guarantee that there is only a match and
no unifiers. The exact algorithm is different here because the the
potentially-overlapping group is closed.
As another example, consider this:
type family G x where
G Int = Bool
G a = Double
type family H y
-- no instances
Now, we want to simplify (G (H Char)). We can't, because (H Char) might later
simplify to be Int. So, (G (H Char)) is stuck, for now.
While everything above is quite sound, it isn't as expressive as we'd like.
Consider this:
type family J a where
J Int = Int
J a = a
Can we simplify (J b) to b? Sure we can. Yes, the first equation matches if
b is instantiated with Int, but the RHSs coincide there, so it's all OK.
So, the rule is this: when looking up a branch in a closed type family, we
find a branch that matches the target, but then we make sure that the target
is apart from every previous *incompatible* branch. We don't check the
branches that are compatible with the matching branch, because they are either
irrelevant (clause 1 of compatible) or benign (clause 2 of compatible).
-}
-- See Note [Compatibility]
compatibleBranches :: CoAxBranch -> CoAxBranch -> Bool
compatibleBranches (CoAxBranch { cab_lhs = lhs1, cab_rhs = rhs1 })
(CoAxBranch { cab_lhs = lhs2, cab_rhs = rhs2 })
= case tcUnifyTysFG (const BindMe) lhs1 lhs2 of
SurelyApart -> True
Unifiable subst
| Type.substTy subst rhs1 `eqType` Type.substTy subst rhs2
-> True
_ -> False
-- | Result of testing two type family equations for injectiviy.
data InjectivityCheckResult
= InjectivityAccepted
-- ^ Either RHSs are distinct or unification of RHSs leads to unification of
-- LHSs
| InjectivityUnified CoAxBranch CoAxBranch
-- ^ RHSs unify but LHSs don't unify under that substitution. Relevant for
-- closed type families where equation after unification might be
-- overlpapped (in which case it is OK if they don't unify). Constructor
-- stores axioms after unification.
-- | Check whether two type family axioms don't violate injectivity annotation.
injectiveBranches :: [Bool] -> CoAxBranch -> CoAxBranch
-> InjectivityCheckResult
injectiveBranches injectivity
ax1@(CoAxBranch { cab_lhs = lhs1, cab_rhs = rhs1 })
ax2@(CoAxBranch { cab_lhs = lhs2, cab_rhs = rhs2 })
-- See Note [Verifying injectivity annotation]. This function implements first
-- check described there.
= let getInjArgs = filterByList injectivity
in case tcUnifyTyWithTFs True rhs1 rhs2 of -- True = two-way pre-unification
Nothing -> InjectivityAccepted -- RHS are different, so equations are
-- injective.
Just subst -> -- RHS unify under a substitution
let lhs1Subst = Type.substTys subst (getInjArgs lhs1)
lhs2Subst = Type.substTys subst (getInjArgs lhs2)
-- If LHSs are equal under the substitution used for RHSs then this pair
-- of equations does not violate injectivity annotation. If LHSs are not
-- equal under that substitution then this pair of equations violates
-- injectivity annotation, but for closed type families it still might
-- be the case that one LHS after substitution is unreachable.
in if eqTypes lhs1Subst lhs2Subst
then InjectivityAccepted
else InjectivityUnified ( ax1 { cab_lhs = Type.substTys subst lhs1
, cab_rhs = Type.substTy subst rhs1 })
( ax2 { cab_lhs = Type.substTys subst lhs2
, cab_rhs = Type.substTy subst rhs2 })
-- takes a CoAxiom with unknown branch incompatibilities and computes
-- the compatibilities
-- See Note [Storing compatibility] in CoAxiom
computeAxiomIncomps :: [CoAxBranch] -> [CoAxBranch]
computeAxiomIncomps branches
= snd (mapAccumL go [] branches)
where
go :: [CoAxBranch] -> CoAxBranch -> ([CoAxBranch], CoAxBranch)
go prev_brs cur_br
= (cur_br : prev_brs, new_br)
where
new_br = cur_br { cab_incomps = mk_incomps prev_brs cur_br }
mk_incomps :: [CoAxBranch] -> CoAxBranch -> [CoAxBranch]
mk_incomps prev_brs cur_br
= filter (not . compatibleBranches cur_br) prev_brs
{-
************************************************************************
* *
Constructing axioms
These functions are here because tidyType / tcUnifyTysFG
are not available in CoAxiom
Also computeAxiomIncomps is too sophisticated for CoAxiom
* *
************************************************************************
Note [Tidy axioms when we build them]
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
We print out axioms and don't want to print stuff like
F k k a b = ...
Instead we must tidy those kind variables. See Trac #7524.
-}
-- all axiom roles are Nominal, as this is only used with type families
mkCoAxBranch :: [TyVar] -- original, possibly stale, tyvars
-> [CoVar] -- possibly stale covars
-> [Type] -- LHS patterns
-> Type -- RHS
-> [Role]
-> SrcSpan
-> CoAxBranch
mkCoAxBranch tvs cvs lhs rhs roles loc
= CoAxBranch { cab_tvs = tvs1
, cab_cvs = cvs1
, cab_lhs = tidyTypes env lhs
, cab_roles = roles
, cab_rhs = tidyType env rhs
, cab_loc = loc
, cab_incomps = placeHolderIncomps }
where
(env1, tvs1) = tidyTyCoVarBndrs emptyTidyEnv tvs
(env, cvs1) = tidyTyCoVarBndrs env1 cvs
-- See Note [Tidy axioms when we build them]
-- all of the following code is here to avoid mutual dependencies with
-- Coercion
mkBranchedCoAxiom :: Name -> TyCon -> [CoAxBranch] -> CoAxiom Branched
mkBranchedCoAxiom ax_name fam_tc branches
= CoAxiom { co_ax_unique = nameUnique ax_name
, co_ax_name = ax_name
, co_ax_tc = fam_tc
, co_ax_role = Nominal
, co_ax_implicit = False
, co_ax_branches = manyBranches (computeAxiomIncomps branches) }
mkUnbranchedCoAxiom :: Name -> TyCon -> CoAxBranch -> CoAxiom Unbranched
mkUnbranchedCoAxiom ax_name fam_tc branch
= CoAxiom { co_ax_unique = nameUnique ax_name
, co_ax_name = ax_name
, co_ax_tc = fam_tc
, co_ax_role = Nominal
, co_ax_implicit = False
, co_ax_branches = unbranched (branch { cab_incomps = [] }) }
mkSingleCoAxiom :: Role -> Name
-> [TyVar] -> [CoVar] -> TyCon -> [Type] -> Type
-> CoAxiom Unbranched
-- Make a single-branch CoAxiom, incluidng making the branch itself
-- Used for both type family (Nominal) and data family (Representational)
-- axioms, hence passing in the Role
mkSingleCoAxiom role ax_name tvs cvs fam_tc lhs_tys rhs_ty
= CoAxiom { co_ax_unique = nameUnique ax_name
, co_ax_name = ax_name
, co_ax_tc = fam_tc
, co_ax_role = role
, co_ax_implicit = False
, co_ax_branches = unbranched (branch { cab_incomps = [] }) }
where
branch = mkCoAxBranch tvs cvs lhs_tys rhs_ty
(map (const Nominal) tvs)
(getSrcSpan ax_name)
-- | Create a coercion constructor (axiom) suitable for the given
-- newtype 'TyCon'. The 'Name' should be that of a new coercion
-- 'CoAxiom', the 'TyVar's the arguments expected by the @newtype@ and
-- the type the appropriate right hand side of the @newtype@, with
-- the free variables a subset of those 'TyVar's.
mkNewTypeCoAxiom :: Name -> TyCon -> [TyVar] -> [Role] -> Type -> CoAxiom Unbranched
mkNewTypeCoAxiom name tycon tvs roles rhs_ty
= CoAxiom { co_ax_unique = nameUnique name
, co_ax_name = name
, co_ax_implicit = True -- See Note [Implicit axioms] in TyCon
, co_ax_role = Representational
, co_ax_tc = tycon
, co_ax_branches = unbranched (branch { cab_incomps = [] }) }
where
branch = mkCoAxBranch tvs [] (mkTyVarTys tvs) rhs_ty
roles (getSrcSpan name)
{-
************************************************************************
* *
Looking up a family instance
* *
************************************************************************
@lookupFamInstEnv@ looks up in a @FamInstEnv@, using a one-way match.
Multiple matches are only possible in case of type families (not data
families), and then, it doesn't matter which match we choose (as the
instances are guaranteed confluent).
We return the matching family instances and the type instance at which it
matches. For example, if we lookup 'T [Int]' and have a family instance
data instance T [a] = ..
desugared to
data :R42T a = ..
coe :Co:R42T a :: T [a] ~ :R42T a
we return the matching instance '(FamInst{.., fi_tycon = :R42T}, Int)'.
-}
-- when matching a type family application, we get a FamInst,
-- and the list of types the axiom should be applied to
data FamInstMatch = FamInstMatch { fim_instance :: FamInst
, fim_tys :: [Type]
, fim_cos :: [Coercion]
}
-- See Note [Over-saturated matches]
instance Outputable FamInstMatch where
ppr (FamInstMatch { fim_instance = inst
, fim_tys = tys
, fim_cos = cos })
= text "match with" <+> parens (ppr inst) <+> ppr tys <+> ppr cos
lookupFamInstEnvByTyCon :: FamInstEnvs -> TyCon -> [FamInst]
lookupFamInstEnvByTyCon (pkg_ie, home_ie) fam_tc
= get pkg_ie ++ get home_ie
where
get ie = case lookupUFM ie fam_tc of
Nothing -> []
Just (FamIE fis) -> fis
lookupFamInstEnv
:: FamInstEnvs
-> TyCon -> [Type] -- What we are looking for
-> [FamInstMatch] -- Successful matches
-- Precondition: the tycon is saturated (or over-saturated)
lookupFamInstEnv
= lookup_fam_inst_env match
where
match _ _ tpl_tys tys = tcMatchTys tpl_tys tys
lookupFamInstEnvConflicts
:: FamInstEnvs
-> FamInst -- Putative new instance
-> [FamInstMatch] -- Conflicting matches (don't look at the fim_tys field)
-- E.g. when we are about to add
-- f : type instance F [a] = a->a
-- we do (lookupFamInstConflicts f [b])
-- to find conflicting matches
--
-- Precondition: the tycon is saturated (or over-saturated)
lookupFamInstEnvConflicts envs fam_inst@(FamInst { fi_axiom = new_axiom })
= lookup_fam_inst_env my_unify envs fam tys
where
(fam, tys) = famInstSplitLHS fam_inst
-- In example above, fam tys' = F [b]
my_unify (FamInst { fi_axiom = old_axiom }) tpl_tvs tpl_tys _
= ASSERT2( tyCoVarsOfTypes tys `disjointVarSet` tpl_tvs,
(ppr fam <+> ppr tys) $$
(ppr tpl_tvs <+> ppr tpl_tys) )
-- Unification will break badly if the variables overlap
-- They shouldn't because we allocate separate uniques for them
if compatibleBranches (coAxiomSingleBranch old_axiom) new_branch
then Nothing
else Just noSubst
-- Note [Family instance overlap conflicts]
noSubst = panic "lookupFamInstEnvConflicts noSubst"
new_branch = coAxiomSingleBranch new_axiom
--------------------------------------------------------------------------------
-- Type family injectivity checking bits --
--------------------------------------------------------------------------------
{- Note [Verifying injectivity annotation]
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Injectivity means that the RHS of a type family uniquely determines the LHS (see
Note [Type inference for type families with injectivity]). User informs about
injectivity using an injectivity annotation and it is GHC's task to verify that
that annotation is correct wrt. to type family equations. Whenever we see a new
equation of a type family we need to make sure that adding this equation to
already known equations of a type family does not violate injectivity annotation
supplied by the user (see Note [Injectivity annotation]). Of course if the type
family has no injectivity annotation then no check is required. But if a type
family has injectivity annotation we need to make sure that the following
conditions hold:
1. For each pair of *different* equations of a type family, one of the following
conditions holds:
A: RHSs are different.
B1: OPEN TYPE FAMILIES: If the RHSs can be unified under some substitution
then it must be possible to unify the LHSs under the same substitution.
Example:
type family FunnyId a = r | r -> a
type instance FunnyId Int = Int
type instance FunnyId a = a
RHSs of these two equations unify under [ a |-> Int ] substitution.
Under this substitution LHSs are equal therefore these equations don't
violate injectivity annotation.
B2: CLOSED TYPE FAMILIES: If the RHSs can be unified under some
substitution then either the LHSs unify under the same substitution or
the LHS of the latter equation is overlapped by earlier equations.
Example 1:
type family SwapIntChar a = r | r -> a where
SwapIntChar Int = Char
SwapIntChar Char = Int
SwapIntChar a = a
Say we are checking the last two equations. RHSs unify under [ a |->
Int ] substitution but LHSs don't. So we apply the substitution to LHS
of last equation and check whether it is overlapped by any of previous
equations. Since it is overlapped by the first equation we conclude
that pair of last two equations does not violate injectivity
annotation.
A special case of B is when RHSs unify with an empty substitution ie. they
are identical.
If any of the above two conditions holds we conclude that the pair of
equations does not violate injectivity annotation. But if we find a pair
of equations where neither of the above holds we report that this pair
violates injectivity annotation because for a given RHS we don't have a
unique LHS. (Note that (B) actually implies (A).)
Note that we only take into account these LHS patterns that were declared
as injective.
2. If a RHS of a type family equation is a bare type variable then
all LHS variables (including implicit kind variables) also have to be bare.
In other words, this has to be a sole equation of that type family and it has
to cover all possible patterns. So for example this definition will be
rejected:
type family W1 a = r | r -> a
type instance W1 [a] = a
If it were accepted we could call `W1 [W1 Int]`, which would reduce to
`W1 Int` and then by injectivity we could conclude that `[W1 Int] ~ Int`,
which is bogus.
3. If a RHS of a type family equation is a type family application then the type
family is rejected as not injective.
4. If a LHS type variable that is declared as injective is not mentioned on
injective position in the RHS then the type family is rejected as not
injective. "Injective position" means either an argument to a type
constructor or argument to a type family on injective position.
See also Note [Injective type families] in TyCon
-}
-- | Check whether an open type family equation can be added to already existing
-- instance environment without causing conflicts with supplied injectivity
-- annotations. Returns list of conflicting axioms (type instance
-- declarations).
lookupFamInstEnvInjectivityConflicts
:: [Bool] -- injectivity annotation for this type family instance
-- INVARIANT: list contains at least one True value
-> FamInstEnvs -- all type instances seens so far
-> FamInst -- new type instance that we're checking
-> [CoAxBranch] -- conflicting instance delcarations
lookupFamInstEnvInjectivityConflicts injList (pkg_ie, home_ie)
fam_inst@(FamInst { fi_axiom = new_axiom })
-- See Note [Verifying injectivity annotation]. This function implements
-- check (1.B1) for open type families described there.
= lookup_inj_fam_conflicts home_ie ++ lookup_inj_fam_conflicts pkg_ie
where
fam = famInstTyCon fam_inst
new_branch = coAxiomSingleBranch new_axiom
-- filtering function used by `lookup_inj_fam_conflicts` to check whether
-- a pair of equations conflicts with the injectivity annotation.
isInjConflict (FamInst { fi_axiom = old_axiom })
| InjectivityAccepted <-
injectiveBranches injList (coAxiomSingleBranch old_axiom) new_branch
= False -- no conflict
| otherwise = True
lookup_inj_fam_conflicts ie
| isOpenFamilyTyCon fam, Just (FamIE insts) <- lookupUFM ie fam
= map (coAxiomSingleBranch . fi_axiom) $
filter isInjConflict insts
| otherwise = []
--------------------------------------------------------------------------------
-- Type family overlap checking bits --
--------------------------------------------------------------------------------
{-
Note [Family instance overlap conflicts]
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
- In the case of data family instances, any overlap is fundamentally a
conflict (as these instances imply injective type mappings).
- In the case of type family instances, overlap is admitted as long as
the right-hand sides of the overlapping rules coincide under the
overlap substitution. eg
type instance F a Int = a
type instance F Int b = b
These two overlap on (F Int Int) but then both RHSs are Int,
so all is well. We require that they are syntactically equal;
anything else would be difficult to test for at this stage.
-}
------------------------------------------------------------
-- Might be a one-way match or a unifier
type MatchFun = FamInst -- The FamInst template
-> TyVarSet -> [Type] -- fi_tvs, fi_tys of that FamInst
-> [Type] -- Target to match against
-> Maybe TCvSubst
lookup_fam_inst_env' -- The worker, local to this module
:: MatchFun
-> FamInstEnv
-> TyCon -> [Type] -- What we are looking for
-> [FamInstMatch]
lookup_fam_inst_env' match_fun ie fam match_tys
| isOpenFamilyTyCon fam
, Just (FamIE insts) <- lookupUFM ie fam
= find insts -- The common case
| otherwise = []
where
find [] = []
find (item@(FamInst { fi_tcs = mb_tcs, fi_tvs = tpl_tvs, fi_cvs = tpl_cvs
, fi_tys = tpl_tys }) : rest)
-- Fast check for no match, uses the "rough match" fields
| instanceCantMatch rough_tcs mb_tcs
= find rest
-- Proper check
| Just subst <- match_fun item (mkVarSet tpl_tvs) tpl_tys match_tys1
= (FamInstMatch { fim_instance = item
, fim_tys = substTyVars subst tpl_tvs `chkAppend` match_tys2
, fim_cos = ASSERT( all (isJust . lookupCoVar subst) tpl_cvs )
substCoVars subst tpl_cvs
})
: find rest
-- No match => try next
| otherwise
= find rest
where
(rough_tcs, match_tys1, match_tys2) = split_tys tpl_tys
-- Precondition: the tycon is saturated (or over-saturated)
-- Deal with over-saturation
-- See Note [Over-saturated matches]
split_tys tpl_tys
| isTypeFamilyTyCon fam
= pre_rough_split_tys
| otherwise
= let (match_tys1, match_tys2) = splitAtList tpl_tys match_tys
rough_tcs = roughMatchTcs match_tys1
in (rough_tcs, match_tys1, match_tys2)
(pre_match_tys1, pre_match_tys2) = splitAt (tyConArity fam) match_tys
pre_rough_split_tys
= (roughMatchTcs pre_match_tys1, pre_match_tys1, pre_match_tys2)
lookup_fam_inst_env -- The worker, local to this module
:: MatchFun
-> FamInstEnvs
-> TyCon -> [Type] -- What we are looking for
-> [FamInstMatch] -- Successful matches
-- Precondition: the tycon is saturated (or over-saturated)
lookup_fam_inst_env match_fun (pkg_ie, home_ie) fam tys
= lookup_fam_inst_env' match_fun home_ie fam tys
++ lookup_fam_inst_env' match_fun pkg_ie fam tys
{-
Note [Over-saturated matches]
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
It's ok to look up an over-saturated type constructor. E.g.
type family F a :: * -> *
type instance F (a,b) = Either (a->b)
The type instance gives rise to a newtype TyCon (at a higher kind
which you can't do in Haskell!):
newtype FPair a b = FP (Either (a->b))
Then looking up (F (Int,Bool) Char) will return a FamInstMatch
(FPair, [Int,Bool,Char])
The "extra" type argument [Char] just stays on the end.
We handle data families and type families separately here:
* For type families, all instances of a type family must have the
same arity, so we can precompute the split between the match_tys
and the overflow tys. This is done in pre_rough_split_tys.
* For data family instances, though, we need to re-split for each
instance, because the breakdown might be different for each
instance. Why? Because of eta reduction; see
Note [Eta reduction for data families].
-}
-- checks if one LHS is dominated by a list of other branches
-- in other words, if an application would match the first LHS, it is guaranteed
-- to match at least one of the others. The RHSs are ignored.
-- This algorithm is conservative:
-- True -> the LHS is definitely covered by the others
-- False -> no information
-- It is currently (Oct 2012) used only for generating errors for
-- inaccessible branches. If these errors go unreported, no harm done.
-- This is defined here to avoid a dependency from CoAxiom to Unify
isDominatedBy :: CoAxBranch -> [CoAxBranch] -> Bool
isDominatedBy branch branches
= or $ map match branches
where
lhs = coAxBranchLHS branch
match (CoAxBranch { cab_lhs = tys })
= isJust $ tcMatchTys tys lhs
{-
************************************************************************
* *
Choosing an axiom application
* *
************************************************************************
The lookupFamInstEnv function does a nice job for *open* type families,
but we also need to handle closed ones when normalising a type:
-}
reduceTyFamApp_maybe :: FamInstEnvs
-> Role -- Desired role of result coercion
-> TyCon -> [Type]
-> Maybe (Coercion, Type)
-- Attempt to do a *one-step* reduction of a type-family application
-- but *not* newtypes
-- Works on type-synonym families always; data-families only if
-- the role we seek is representational
-- It does *not* normlise the type arguments first, so this may not
-- go as far as you want. If you want normalised type arguments,
-- use normaliseTcArgs first.
--
-- The TyCon can be oversaturated.
-- Works on both open and closed families
--
-- Always returns a *homogeneous* coercion -- type family reductions are always
-- homogeneous
reduceTyFamApp_maybe envs role tc tys
| Phantom <- role
= Nothing
| case role of
Representational -> isOpenFamilyTyCon tc
_ -> isOpenTypeFamilyTyCon tc
-- If we seek a representational coercion
-- (e.g. the call in topNormaliseType_maybe) then we can
-- unwrap data families as well as type-synonym families;
-- otherwise only type-synonym families
, FamInstMatch { fim_instance = FamInst { fi_axiom = ax }
, fim_tys = inst_tys
, fim_cos = inst_cos } : _ <- lookupFamInstEnv envs tc tys
-- NB: Allow multiple matches because of compatible overlap
= let co = mkUnbranchedAxInstCo role ax inst_tys inst_cos
ty = pSnd (coercionKind co)
in Just (co, ty)
| Just ax <- isClosedSynFamilyTyConWithAxiom_maybe tc
, Just (ind, inst_tys, inst_cos) <- chooseBranch ax tys
= let co = mkAxInstCo role ax ind inst_tys inst_cos
ty = pSnd (coercionKind co)
in Just (co, ty)
| Just ax <- isBuiltInSynFamTyCon_maybe tc
, Just (coax,ts,ty) <- sfMatchFam ax tys
= let co = mkAxiomRuleCo coax (zipWith mkReflCo (coaxrAsmpRoles coax) ts)
in Just (co, ty)
| otherwise
= Nothing
-- The axiom can be oversaturated. (Closed families only.)
chooseBranch :: CoAxiom Branched -> [Type]
-> Maybe (BranchIndex, [Type], [Coercion]) -- found match, with args
chooseBranch axiom tys
= do { let num_pats = coAxiomNumPats axiom
(target_tys, extra_tys) = splitAt num_pats tys
branches = coAxiomBranches axiom
; (ind, inst_tys, inst_cos)
<- findBranch (fromBranches branches) target_tys
; return ( ind, inst_tys `chkAppend` extra_tys, inst_cos ) }
-- The axiom must *not* be oversaturated
findBranch :: [CoAxBranch] -- branches to check
-> [Type] -- target types
-> Maybe (BranchIndex, [Type], [Coercion])
-- coercions relate requested types to returned axiom LHS at role N
findBranch branches target_tys
= go 0 branches
where
go ind (branch@(CoAxBranch { cab_tvs = tpl_tvs, cab_cvs = tpl_cvs
, cab_lhs = tpl_lhs
, cab_incomps = incomps }) : rest)
= let in_scope = mkInScopeSet (unionVarSets $
map (tyCoVarsOfTypes . coAxBranchLHS) incomps)
-- See Note [Flattening] below
flattened_target = flattenTys in_scope target_tys
in case tcMatchTys tpl_lhs target_tys of
Just subst -- matching worked. now, check for apartness.
| apartnessCheck flattened_target branch
-> -- matching worked & we're apart from all incompatible branches.
-- success
ASSERT( all (isJust . lookupCoVar subst) tpl_cvs )
Just (ind, substTyVars subst tpl_tvs, substCoVars subst tpl_cvs)
-- failure. keep looking
_ -> go (ind+1) rest
-- fail if no branches left
go _ [] = Nothing
-- | Do an apartness check, as described in the "Closed Type Families" paper
-- (POPL '14). This should be used when determining if an equation
-- ('CoAxBranch') of a closed type family can be used to reduce a certain target
-- type family application.
apartnessCheck :: [Type] -- ^ /flattened/ target arguments. Make sure
-- they're flattened! See Note [Flattening].
-- (NB: This "flat" is a different
-- "flat" than is used in TcFlatten.)
-> CoAxBranch -- ^ the candidate equation we wish to use
-- Precondition: this matches the target
-> Bool -- ^ True <=> equation can fire
apartnessCheck flattened_target (CoAxBranch { cab_incomps = incomps })
= all (isSurelyApart
. tcUnifyTysFG (const BindMe) flattened_target
. coAxBranchLHS) incomps
where
isSurelyApart SurelyApart = True
isSurelyApart _ = False
{-
************************************************************************
* *
Looking up a family instance
* *
************************************************************************
Note [Normalising types]
~~~~~~~~~~~~~~~~~~~~~~~~
The topNormaliseType function removes all occurrences of type families
and newtypes from the top-level structure of a type. normaliseTcApp does
the type family lookup and is fairly straightforward. normaliseType is
a little more involved.
The complication comes from the fact that a type family might be used in the
kind of a variable bound in a forall. We wish to remove this type family
application, but that means coming up with a fresh variable (with the new
kind). Thus, we need a substitution to be built up as we recur through the
type. However, an ordinary TCvSubst just won't do: when we hit a type variable
whose kind has changed during normalisation, we need both the new type
variable *and* the coercion. We could conjure up a new VarEnv with just this
property, but a usable substitution environment already exists:
LiftingContexts from the liftCoSubst family of functions, defined in Coercion.
A LiftingContext maps a type variable to a coercion and a coercion variable to
a pair of coercions. Let's ignore coercion variables for now. Because the
coercion a type variable maps to contains the destination type (via
coercionKind), we don't need to store that destination type separately. Thus,
a LiftingContext has what we need: a map from type variables to (Coercion,
Type) pairs.
We also benefit because we can piggyback on the liftCoSubstVarBndr function to
deal with binders. However, I had to modify that function to work with this
application. Thus, we now have liftCoSubstVarBndrCallback, which takes
a function used to process the kind of the binder. We don't wish
to lift the kind, but instead normalise it. So, we pass in a callback function
that processes the kind of the binder.
After that brilliant explanation of all this, I'm sure you've forgotten the
dangling reference to coercion variables. What do we do with those? Nothing at
all. The point of normalising types is to remove type family applications, but
there's no sense in removing these from coercions. We would just get back a
new coercion witnessing the equality between the same types as the original
coercion. Because coercions are irrelevant anyway, there is no point in doing
this. So, whenever we encounter a coercion, we just say that it won't change.
That's what the CoercionTy case is doing within normalise_type.
-}
topNormaliseType :: FamInstEnvs -> Type -> Type
topNormaliseType env ty = case topNormaliseType_maybe env ty of
Just (_co, ty') -> ty'
Nothing -> ty
topNormaliseType_maybe :: FamInstEnvs -> Type -> Maybe (Coercion, Type)
-- ^ Get rid of *outermost* (or toplevel)
-- * type function redex
-- * data family redex
-- * newtypes
-- returning an appropriate Representational coercion. Specifically, if
-- topNormaliseType_maybe env ty = Maybe (co, ty')
-- then
-- (a) co :: ty ~R ty'
-- (b) ty' is not a newtype, and is not a type-family or data-family redex
--
-- However, ty' can be something like (Maybe (F ty)), where
-- (F ty) is a redex.
--
-- Its a bit like Type.repType, but handles type families too
topNormaliseType_maybe env ty
= topNormaliseTypeX_maybe stepper ty
where
stepper = unwrapNewTypeStepper `composeSteppers` tyFamStepper
tyFamStepper rec_nts tc tys -- Try to step a type/data familiy
= let (args_co, ntys) = normaliseTcArgs env Representational tc tys in
-- NB: It's OK to use normaliseTcArgs here instead of
-- normalise_tc_args (which takes the LiftingContext described
-- in Note [Normalising types]) because the reduceTyFamApp below
-- works only at top level. We'll never recur in this function
-- after reducing the kind of a bound tyvar.
case reduceTyFamApp_maybe env Representational tc ntys of
Just (co, rhs) -> NS_Step rec_nts rhs (args_co `mkTransCo` co)
_ -> NS_Done
---------------
normaliseTcApp :: FamInstEnvs -> Role -> TyCon -> [Type] -> (Coercion, Type)
-- See comments on normaliseType for the arguments of this function
normaliseTcApp env role tc tys
= initNormM env role (tyCoVarsOfTypes tys) $
normalise_tc_app tc tys
-- See Note [Normalising types] about the LiftingContext
normalise_tc_app :: TyCon -> [Type] -> NormM (Coercion, Type)
normalise_tc_app tc tys
= do { (args_co, ntys) <- normalise_tc_args tc tys
; case expandSynTyCon_maybe tc ntys of
{ Just (tenv, rhs, ntys') ->
do { (co2, ninst_rhs)
<- normalise_type (substTy (mkTvSubstPrs tenv) rhs)
; return $
if isReflCo co2
then (args_co, mkTyConApp tc ntys)
else (args_co `mkTransCo` co2, mkAppTys ninst_rhs ntys') }
; Nothing ->
do { env <- getEnv
; role <- getRole
; case reduceTyFamApp_maybe env role tc ntys of
Just (first_co, ty')
-> do { (rest_co,nty) <- normalise_type ty'
; return ( args_co `mkTransCo` first_co `mkTransCo` rest_co
, nty ) }
_ -> -- No unique matching family instance exists;
-- we do not do anything
return (args_co, mkTyConApp tc ntys) }}}
---------------
-- | Normalise arguments to a tycon
normaliseTcArgs :: FamInstEnvs -- ^ env't with family instances
-> Role -- ^ desired role of output coercion
-> TyCon -- ^ tc
-> [Type] -- ^ tys
-> (Coercion, [Type]) -- ^ co :: tc tys ~ tc new_tys
normaliseTcArgs env role tc tys
= initNormM env role (tyCoVarsOfTypes tys) $
normalise_tc_args tc tys
normalise_tc_args :: TyCon -> [Type] -- tc tys
-> NormM (Coercion, [Type]) -- (co, new_tys), where
-- co :: tc tys ~ tc new_tys
normalise_tc_args tc tys
= do { role <- getRole
; (cois, ntys) <- zipWithAndUnzipM normalise_type_role
tys (tyConRolesX role tc)
; return (mkTyConAppCo role tc cois, ntys) }
where
normalise_type_role ty r = withRole r $ normalise_type ty
---------------
normaliseType :: FamInstEnvs
-> Role -- desired role of coercion
-> Type -> (Coercion, Type)
normaliseType env role ty
= initNormM env role (tyCoVarsOfType ty) $ normalise_type ty
normalise_type :: Type -- old type
-> NormM (Coercion, Type) -- (coercion,new type), where
-- co :: old-type ~ new_type
-- Normalise the input type, by eliminating *all* type-function redexes
-- but *not* newtypes (which are visible to the programmer)
-- Returns with Refl if nothing happens
-- Does nothing to newtypes
-- The returned coercion *must* be *homogeneous*
-- See Note [Normalising types]
-- Try to not to disturb type synonyms if possible
normalise_type
= go
where
go (TyConApp tc tys) = normalise_tc_app tc tys
go ty@(LitTy {}) = do { r <- getRole
; return (mkReflCo r ty, ty) }
go (AppTy ty1 ty2)
= do { (co, nty1) <- go ty1
; (arg, nty2) <- withRole Nominal $ go ty2
; return (mkAppCo co arg, mkAppTy nty1 nty2) }
go (ForAllTy (Anon ty1) ty2)
= do { (co1, nty1) <- go ty1
; (co2, nty2) <- go ty2
; r <- getRole
; return (mkFunCo r co1 co2, mkFunTy nty1 nty2) }
go (ForAllTy (Named tyvar vis) ty)
= do { (lc', tv', h, ki') <- normalise_tyvar_bndr tyvar
; (co, nty) <- withLC lc' $ normalise_type ty
; let tv2 = setTyVarKind tv' ki'
; return (mkForAllCo tv' h co, mkNamedForAllTy tv2 vis nty) }
go (TyVarTy tv) = normalise_tyvar tv
go (CastTy ty co)
= do { (nco, nty) <- go ty
; lc <- getLC
; let co' = substRightCo lc co
; return (castCoercionKind nco co co', mkCastTy nty co') }
go (CoercionTy co)
= do { lc <- getLC
; r <- getRole
; let right_co = substRightCo lc co
; return ( mkProofIrrelCo r
(liftCoSubst Nominal lc (coercionType co))
co right_co
, mkCoercionTy right_co ) }
normalise_tyvar :: TyVar -> NormM (Coercion, Type)
normalise_tyvar tv
= ASSERT( isTyVar tv )
do { lc <- getLC
; r <- getRole
; return $ case liftCoSubstTyVar lc r tv of
Just co -> (co, pSnd $ coercionKind co)
Nothing -> (mkReflCo r ty, ty) }
where ty = mkTyVarTy tv
normalise_tyvar_bndr :: TyVar -> NormM (LiftingContext, TyVar, Coercion, Kind)
normalise_tyvar_bndr tv
= do { lc1 <- getLC
; env <- getEnv
; let callback lc ki = runNormM (normalise_type ki) env lc Nominal
; return $ liftCoSubstVarBndrCallback callback lc1 tv }
-- | a monad for the normalisation functions, reading 'FamInstEnvs',
-- a 'LiftingContext', and a 'Role'.
newtype NormM a = NormM { runNormM ::
FamInstEnvs -> LiftingContext -> Role -> a }
initNormM :: FamInstEnvs -> Role
-> TyCoVarSet -- the in-scope variables
-> NormM a -> a
initNormM env role vars (NormM thing_inside)
= thing_inside env lc role
where
in_scope = mkInScopeSet vars
lc = emptyLiftingContext in_scope
getRole :: NormM Role
getRole = NormM (\ _ _ r -> r)
getLC :: NormM LiftingContext
getLC = NormM (\ _ lc _ -> lc)
getEnv :: NormM FamInstEnvs
getEnv = NormM (\ env _ _ -> env)
withRole :: Role -> NormM a -> NormM a
withRole r thing = NormM $ \ envs lc _old_r -> runNormM thing envs lc r
withLC :: LiftingContext -> NormM a -> NormM a
withLC lc thing = NormM $ \ envs _old_lc r -> runNormM thing envs lc r
instance Monad NormM where
ma >>= fmb = NormM $ \env lc r ->
let a = runNormM ma env lc r in
runNormM (fmb a) env lc r
instance Functor NormM where
fmap = liftM
instance Applicative NormM where
pure x = NormM $ \ _ _ _ -> x
(<*>) = ap
{-
************************************************************************
* *
Flattening
* *
************************************************************************
Note [Flattening]
~~~~~~~~~~~~~~~~~
As described in "Closed type families with overlapping equations"
http://research.microsoft.com/en-us/um/people/simonpj/papers/ext-f/axioms-extended.pdf
we need to flatten core types before unifying them, when checking for "surely-apart"
against earlier equations of a closed type family.
Flattening means replacing all top-level uses of type functions with
fresh variables, *taking care to preserve sharing*. That is, the type
(Either (F a b) (F a b)) should flatten to (Either c c), never (Either
c d).
Here is a nice example of why it's all necessary:
type family F a b where
F Int Bool = Char
F a b = Double
type family G a -- open, no instances
How do we reduce (F (G Float) (G Float))? The first equation clearly doesn't match,
while the second equation does. But, before reducing, we must make sure that the
target can never become (F Int Bool). Well, no matter what G Float becomes, it
certainly won't become *both* Int and Bool, so indeed we're safe reducing
(F (G Float) (G Float)) to Double.
This is necessary not only to get more reductions (which we might be
willing to give up on), but for substitutivity. If we have (F x x), we
can see that (F x x) can reduce to Double. So, it had better be the
case that (F blah blah) can reduce to Double, no matter what (blah)
is! Flattening as done below ensures this.
flattenTys is defined here because of module dependencies.
-}
data FlattenEnv = FlattenEnv { fe_type_map :: TypeMap TyVar
, fe_subst :: TCvSubst }
emptyFlattenEnv :: InScopeSet -> FlattenEnv
emptyFlattenEnv in_scope
= FlattenEnv { fe_type_map = emptyTypeMap
, fe_subst = mkEmptyTCvSubst in_scope }
-- See Note [Flattening]
flattenTys :: InScopeSet -> [Type] -> [Type]
flattenTys in_scope tys = snd $ coreFlattenTys env tys
where
-- when we hit a type function, we replace it with a fresh variable
-- but, we need to make sure that this fresh variable isn't mentioned
-- *anywhere* in the types we're flattening, even if locally-bound in
-- a forall. That way, we can ensure consistency both within and outside
-- of that forall.
all_in_scope = in_scope `extendInScopeSetSet` allTyVarsInTys tys
env = emptyFlattenEnv all_in_scope
coreFlattenTys :: FlattenEnv -> [Type] -> (FlattenEnv, [Type])
coreFlattenTys = go []
where
go rtys env [] = (env, reverse rtys)
go rtys env (ty : tys)
= let (env', ty') = coreFlattenTy env ty in
go (ty' : rtys) env' tys
coreFlattenTy :: FlattenEnv -> Type -> (FlattenEnv, Type)
coreFlattenTy = go
where
go env ty | Just ty' <- coreView ty = go env ty'
go env (TyVarTy tv) = (env, substTyVar (fe_subst env) tv)
go env (AppTy ty1 ty2) = let (env1, ty1') = go env ty1
(env2, ty2') = go env1 ty2 in
(env2, AppTy ty1' ty2')
go env (TyConApp tc tys)
-- NB: Don't just check if isFamilyTyCon: this catches *data* families,
-- which are generative and thus can be preserved during flattening
| not (isGenerativeTyCon tc Nominal)
= let (env', tv) = coreFlattenTyFamApp env tc tys in
(env', mkTyVarTy tv)
| otherwise
= let (env', tys') = coreFlattenTys env tys in
(env', mkTyConApp tc tys')
go env (ForAllTy (Anon ty1) ty2) = let (env1, ty1') = go env ty1
(env2, ty2') = go env1 ty2 in
(env2, mkFunTy ty1' ty2')
go env (ForAllTy (Named tv vis) ty)
= let (env1, tv') = coreFlattenVarBndr env tv
(env2, ty') = go env1 ty in
(env2, mkNamedForAllTy tv' vis ty')
go env ty@(LitTy {}) = (env, ty)
go env (CastTy ty co) = let (env1, ty') = go env ty
(env2, co') = coreFlattenCo env1 co in
(env2, CastTy ty' co')
go env (CoercionTy co) = let (env', co') = coreFlattenCo env co in
(env', CoercionTy co')
-- when flattening, we don't care about the contents of coercions.
-- so, just return a fresh variable of the right (flattened) type
coreFlattenCo :: FlattenEnv -> Coercion -> (FlattenEnv, Coercion)
coreFlattenCo env co
= (env2, mkCoVarCo covar)
where
(env1, kind') = coreFlattenTy env (coercionType co)
fresh_name = mkFlattenFreshCoName
subst1 = fe_subst env1
in_scope = getTCvInScope subst1
covar = uniqAway in_scope (mkCoVar fresh_name kind')
env2 = env1 { fe_subst = subst1 `extendTCvInScope` covar }
coreFlattenVarBndr :: FlattenEnv -> TyVar -> (FlattenEnv, TyVar)
coreFlattenVarBndr env tv
| kind' `eqType` kind
= ( env { fe_subst = extendTvSubst old_subst tv (mkTyVarTy tv) }
-- override any previous binding for tv
, tv)
| otherwise
= let new_tv = uniqAway (getTCvInScope old_subst) (setTyVarKind tv kind')
new_subst = extendTvSubstWithClone old_subst tv new_tv
in
(env' { fe_subst = new_subst }, new_tv)
where
kind = tyVarKind tv
(env', kind') = coreFlattenTy env kind
old_subst = fe_subst env
coreFlattenTyFamApp :: FlattenEnv
-> TyCon -- type family tycon
-> [Type] -- args
-> (FlattenEnv, TyVar)
coreFlattenTyFamApp env fam_tc fam_args
= case lookupTypeMap type_map fam_ty of
Just tv -> (env, tv)
-- we need fresh variables here, but this is called far from
-- any good source of uniques. So, we just use the fam_tc's unique
-- and trust uniqAway to avoid clashes. Recall that the in_scope set
-- contains *all* tyvars, even locally bound ones elsewhere in the
-- overall type, so this really is fresh.
Nothing -> let tyvar_name = mkFlattenFreshTyName fam_tc
tv = uniqAway (getTCvInScope subst) $
mkTyVar tyvar_name (typeKind fam_ty)
env' = env { fe_type_map = extendTypeMap type_map fam_ty tv
, fe_subst = extendTCvInScope subst tv }
in (env', tv)
where fam_ty = mkTyConApp fam_tc fam_args
FlattenEnv { fe_type_map = type_map
, fe_subst = subst } = env
-- | Get the set of all type variables mentioned anywhere in the list
-- of types. These variables are not necessarily free.
allTyVarsInTys :: [Type] -> VarSet
allTyVarsInTys [] = emptyVarSet
allTyVarsInTys (ty:tys) = allTyVarsInTy ty `unionVarSet` allTyVarsInTys tys
-- | Get the set of all type variables mentioned anywhere in a type.
allTyVarsInTy :: Type -> VarSet
allTyVarsInTy = go
where
go (TyVarTy tv) = unitVarSet tv
go (AppTy ty1 ty2) = (go ty1) `unionVarSet` (go ty2)
go (TyConApp _ tys) = allTyVarsInTys tys
go (ForAllTy bndr ty) =
caseBinder bndr (\tv -> unitVarSet tv) (const emptyVarSet)
`unionVarSet` go (binderType bndr) `unionVarSet` go ty
-- don't remove the tv from the set!
go (LitTy {}) = emptyVarSet
go (CastTy ty co) = go ty `unionVarSet` go_co co
go (CoercionTy co) = go_co co
go_co (Refl _ ty) = go ty
go_co (TyConAppCo _ _ args) = go_cos args
go_co (AppCo co arg) = go_co co `unionVarSet` go_co arg
go_co (ForAllCo tv h co)
= unionVarSets [unitVarSet tv, go_co co, go_co h]
go_co (CoVarCo cv) = unitVarSet cv
go_co (AxiomInstCo _ _ cos) = go_cos cos
go_co (UnivCo p _ t1 t2) = go_prov p `unionVarSet` go t1 `unionVarSet` go t2
go_co (SymCo co) = go_co co
go_co (TransCo c1 c2) = go_co c1 `unionVarSet` go_co c2
go_co (NthCo _ co) = go_co co
go_co (LRCo _ co) = go_co co
go_co (InstCo co arg) = go_co co `unionVarSet` go_co arg
go_co (CoherenceCo c1 c2) = go_co c1 `unionVarSet` go_co c2
go_co (KindCo co) = go_co co
go_co (SubCo co) = go_co co
go_co (AxiomRuleCo _ cs) = go_cos cs
go_cos = foldr (unionVarSet . go_co) emptyVarSet
go_prov UnsafeCoerceProv = emptyVarSet
go_prov (PhantomProv co) = go_co co
go_prov (ProofIrrelProv co) = go_co co
go_prov (PluginProv _) = emptyVarSet
go_prov (HoleProv _) = emptyVarSet
mkFlattenFreshTyName :: Uniquable a => a -> Name
mkFlattenFreshTyName unq
= mkSysTvName (getUnique unq) (fsLit "flt")
mkFlattenFreshCoName :: Name
mkFlattenFreshCoName
= mkSystemVarName (deriveUnique eqPrimTyConKey 71) (fsLit "flc")
|
oldmanmike/ghc
|
compiler/types/FamInstEnv.hs
|
Haskell
|
bsd-3-clause
| 64,841
|
{-# LANGUAGE ExistentialQuantification #-}
-- |This module provides widgets to center other widgets horizontally
-- and vertically. These centering widgets relay focus and key events
-- to their children.
module Graphics.Vty.Widgets.Centering
( HCentered
, VCentered
, hCentered
, vCentered
, centered
)
where
import Graphics.Vty.Widgets.Core
import Graphics.Vty
import Graphics.Vty.Widgets.Util
data HCentered a = (Show a) => HCentered (Widget a)
instance Show (HCentered a) where
show (HCentered _) = "HCentered { ... }"
-- |Wrap another widget to center it horizontally.
hCentered :: (Show a) => Widget a -> IO (Widget (HCentered a))
hCentered ch = do
wRef <- newWidget (HCentered ch) $ \w ->
w { growHorizontal_ = const $ return True
, growVertical_ = \(HCentered child) -> growVertical child
, render_ = \this s ctx -> do
HCentered child <- getState this
img <- render child s ctx
let attr' = getNormalAttr ctx
(half, half') = centered_halves regionWidth s (imageWidth img)
return $ if half > 0
then horizCat [ charFill attr' ' ' half (imageHeight img)
, img
, charFill attr' ' ' half' (imageHeight img)
]
else img
, setCurrentPosition_ =
\this pos -> do
HCentered child <- getState this
s <- getCurrentSize this
chSz <- getCurrentSize child
let (half, _) = centered_halves regionWidth s (regionWidth chSz)
chPos = pos `plusWidth` half
setCurrentPosition child chPos
, getCursorPosition_ = \this -> do
HCentered child <- getState this
getCursorPosition child
}
wRef `relayKeyEvents` ch
wRef `relayFocusEvents` ch
return wRef
data VCentered a = (Show a) => VCentered (Widget a)
instance Show (VCentered a) where
show (VCentered _) = "VCentered { ... }"
-- |Wrap another widget to center it vertically.
vCentered :: (Show a) => Widget a -> IO (Widget (VCentered a))
vCentered ch = do
wRef <- newWidget (VCentered ch) $ \w ->
w { growVertical_ = const $ return True
, growHorizontal_ = const $ growHorizontal ch
, render_ = \this s ctx -> do
VCentered child <- getState this
img <- render child s ctx
let attr' = getNormalAttr ctx
(half, half') = centered_halves regionHeight s (imageHeight img)
return $ if half > 0
then vertCat [ charFill attr' ' ' (imageWidth img) half
, img
, charFill attr' ' ' (imageWidth img) half'
]
else img
, setCurrentPosition_ =
\this pos -> do
VCentered child <- getState this
s <- getCurrentSize this
chSz <- getCurrentSize child
let (half, _) = centered_halves regionHeight s (regionHeight chSz)
chPos = pos `plusHeight` half
setCurrentPosition child chPos
, getCursorPosition_ = \this -> do
VCentered child <- getState this
getCursorPosition child
}
wRef `relayKeyEvents` ch
wRef `relayFocusEvents` ch
return wRef
-- |Wrap another widget to center it both vertically and horizontally.
centered :: (Show a) => Widget a -> IO (Widget (VCentered (HCentered a)))
centered wRef = vCentered =<< hCentered wRef
centered_halves :: (DisplayRegion -> Int) -> DisplayRegion -> Int -> (Int, Int)
centered_halves region_size s obj_sz =
let remaining = region_size s - obj_sz
half = remaining `div` 2
half' = if remaining `mod` 2 == 0
then half
else half + 1
in (half, half')
|
KommuSoft/vty-ui
|
src/Graphics/Vty/Widgets/Centering.hs
|
Haskell
|
bsd-3-clause
| 4,135
|
{-# LANGUAGE MagicHash #-}
{-# LANGUAGE TupleSections #-}
{-# LANGUAGE UnboxedTuples #-}
{-# OPTIONS_HADDOCK hide #-}
-- |
-- Module : Data.Array.Accelerate.Lifetime
-- Copyright : [2015] Robert Clifton-Everest, Manuel M T Chakravarty, Gabriele Keller
-- License : BSD3
--
-- Maintainer : Robert Clifton-Everest <robertce@cse.unsw.edu.au>
-- Stability : experimental
-- Portability : non-portable (GHC extensions)
--
module Data.Array.Accelerate.Lifetime (
Lifetime, newLifetime, withLifetime, addFinalizer, finalize, mkWeak,
mkWeakPtr, unsafeGetValue
) where
import Control.Applicative
import Data.Function ( on )
import Data.IORef ( mkWeakIORef, atomicModifyIORef' )
import Prelude
import GHC.Base ( touch#, IO(..))
import GHC.IORef ( IORef(.. ), newIORef )
import GHC.Prim ( mkWeak# )
import GHC.STRef ( STRef(..) )
import GHC.Weak ( Weak(..) )
-- | A lifetime represents a value with attached finalizers. This is similar to
-- the functionality provided by "System.Mem.Weak", but has the following
-- stronger properties:
--
-- * Unless explicitly forced, finalizers will not fire until after the
-- 'Lifetime' has become unreachable, where \"reachability\" is the same as
-- defined in "System.Mem.Weak". That is to say, there is no issue with
-- creating a 'Lifetime' for a non-primitve type and finalizers firing while
-- an object is still reachable.
--
-- * Finalizers are fired sequentially in reverse of the order in which they
-- were attached.
--
-- * As the finalizers are attached to the 'Lifetime' and not the underlying
-- value, there is no danger in storing it UNPACKED as part of another
-- structure.
--
data Lifetime a = Lifetime {-# UNPACK #-} !(IORef [IO ()]) {-# UNPACK #-} !(Weak (IORef [IO ()])) a
instance Eq a => Eq (Lifetime a) where
(==) = (==) `on` unsafeGetValue
-- | Construct a new 'Lifetime' from the given value.
--
newLifetime :: a -> IO (Lifetime a)
newLifetime a = do
ref <- newIORef []
weak <- mkWeakIORef ref (finalizer ref)
return $! Lifetime ref weak a
-- | This provides a way of looking at the value inside a 'Lifetime'. The
-- supplied function is executed immediately and the 'Lifetime' kept alive
-- throughout its execution. It is important to not let the value /leak/ outside
-- the function, either by returning it or by lazy IO.
--
withLifetime :: Lifetime a -> (a -> IO b) -> IO b
withLifetime (Lifetime ref _ a) f = f a <* touch ref
-- | Attaches a finalizer to a 'Lifetime'. Like in "System.Mem.Weak", there is
-- no guarantee that the finalizers will eventually run. If they do run,
-- they will be executed in the order in which they were supplied.
--
addFinalizer :: Lifetime a -> IO () -> IO ()
addFinalizer (Lifetime ref _ _) f = atomicModifyIORef' ref (\fs -> (f:fs,()))
-- | Causes any finalizers associated with the given lifetime to be run
-- immediately on the calling thread.
--
-- Because the finalizer is run on the calling thread. Care should be taken to
-- ensure that the it does not try to acquire any locks the calling thread might
-- already possess. This can result in deadlock and is in contrast to calling
-- 'System.Mem.Weak.finalize' on 'System.Mem.Weak.Weak'.
--
finalize :: Lifetime a -> IO ()
finalize (Lifetime ref _ _) = finalizer ref
-- | Create a weak pointer from a 'Lifetime' to the supplied value.
--
-- Because weak pointers have their own concept of finalizers, it is important
-- to note these behaviours:
--
-- * Calling 'System.Mem.Weak.finalize' causes the finalizers attached to the
-- lifetime to be scheduled, and run in the correct order, but does not
-- guarantee they will execute on the calling thread.
--
-- * If 'deRefWeak' returns Nothing, there is no guarantee that the finalizers
-- have already run.
--
mkWeak :: Lifetime k -> v -> IO (Weak v)
mkWeak (Lifetime ref@(IORef (STRef r#)) _ _) v = IO $ \s ->
case mkWeak# r# v f s of (# s', w# #) -> (# s', Weak w# #)
where
f = finalizer ref
-- A specialised version of 'mkWeak' where the key and value are the same
-- 'Lifetime'.
--
-- > mkWeakPtr key = mkWeak key key
--
mkWeakPtr :: Lifetime a -> IO (Weak (Lifetime a))
mkWeakPtr l = mkWeak l l
-- | Retrieve the value from a lifetime. This is unsafe because, unless the
-- 'Lifetime' is still reachable, the finalizers may fire, potentially
-- invalidating the value.
--
unsafeGetValue :: Lifetime a -> a
unsafeGetValue (Lifetime _ _ a) = a
-- The actual finalizer for 'Lifetime's.
--
finalizer :: IORef [IO ()] -> IO ()
finalizer ref = do
fins <- atomicModifyIORef' ref ([],)
sequence_ fins
-- Touch an 'IORef', keeping it alive.
--
touch :: IORef a -> IO ()
touch r = IO $ \s -> case touch# r s of s' -> (# s', () #)
|
rrnewton/accelerate
|
Data/Array/Accelerate/Lifetime.hs
|
Haskell
|
bsd-3-clause
| 4,837
|
----------------------------------------------------------------------------
-- |
-- Module : Main
-- Copyright : (c) Spencer Janssen 2007
-- License : BSD3-style (see LICENSE)
--
-- Maintainer : sjanssen@cse.unl.edu
-- Stability : unstable
-- Portability : not portable, uses mtl, X11, posix
--
-- xmonad, a minimalist, tiling window manager for X11
--
-----------------------------------------------------------------------------
module Main (main) where
import XMonad
import Control.Monad (unless)
import System.Info
import System.Environment
import System.Posix.Process (executeFile)
import System.Exit (exitFailure)
import Paths_xmonad (version)
import Data.Version (showVersion)
import Graphics.X11.Xinerama (compiledWithXinerama)
-- | The entry point into xmonad. Attempts to compile any custom main
-- for xmonad, and if it doesn't find one, just launches the default.
main :: IO ()
main = do
installSignalHandlers -- important to ignore SIGCHLD to avoid zombies
args <- getArgs
let launch = catchIO buildLaunch >> xmonad def
case args of
[] -> launch
("--resume":_) -> launch
["--help"] -> usage
["--recompile"] -> recompile True >>= flip unless exitFailure
["--replace"] -> launch
["--restart"] -> sendRestart >> return ()
["--version"] -> putStrLn $ unwords shortVersion
["--verbose-version"] -> putStrLn . unwords $ shortVersion ++ longVersion
_ -> fail "unrecognized flags"
where
shortVersion = ["xmonad", showVersion version]
longVersion = [ "compiled by", compilerName, showVersion compilerVersion
, "for", arch ++ "-" ++ os
, "\nXinerama:", show compiledWithXinerama ]
usage :: IO ()
usage = do
self <- getProgName
putStr . unlines $
concat ["Usage: ", self, " [OPTION]"] :
"Options:" :
" --help Print this message" :
" --version Print the version number" :
" --recompile Recompile your ~/.xmonad/xmonad.hs" :
" --replace Replace the running window manager with xmonad" :
" --restart Request a running xmonad process to restart" :
[]
-- | Build "~\/.xmonad\/xmonad.hs" with ghc, then execute it. If there are no
-- errors, this function does not return. An exception is raised in any of
-- these cases:
--
-- * ghc missing
--
-- * both "~\/.xmonad\/xmonad.hs" and "~\/.xmonad\/xmonad-$arch-$os" missing
--
-- * xmonad.hs fails to compile
--
-- ** wrong ghc in path (fails to compile)
--
-- ** type error, syntax error, ..
--
-- * Missing XMonad\/XMonadContrib modules due to ghc upgrade
--
buildLaunch :: IO ()
buildLaunch = do
recompile False
dir <- getXMonadDir
args <- getArgs
executeFile (dir ++ "/xmonad-"++arch++"-"++os) False args Nothing
return ()
sendRestart :: IO ()
sendRestart = do
dpy <- openDisplay ""
rw <- rootWindow dpy $ defaultScreen dpy
xmonad_restart <- internAtom dpy "XMONAD_RESTART" False
allocaXEvent $ \e -> do
setEventType e clientMessage
setClientMessageEvent e rw xmonad_restart 32 0 currentTime
sendEvent dpy rw False structureNotifyMask e
sync dpy False
|
atupal/xmonad-mirror
|
xmonad/Main.hs
|
Haskell
|
mit
| 3,413
|
{-# LANGUAGE TypeFamilies #-}
{-# LANGUAGE FlexibleContexts #-}
import Control.CP.FD.Example
-- diffList: the differences between successive elements of a list
diffList l = exists $ \d -> do -- request a (collection) variable d
let n = size l -- introduce n as alias for size l
size d @= n-1 -- size of d must be one less than l
loopall (0,n-2) $ \i -> do -- for each i in [0..n-2]
d!i @= abs (l!i - l!(i+1)) -- d[i] = abs(l[i]-l[i+1])
return d -- and return d to the caller
model :: ExampleModel ModelInt -- type signature
model n = -- function 'model' takes argument n
exists $ \x -> do -- request a (collection) variable x
size x @= n -- whose size must be n
d <- diffList x -- d becomes the diffList of x
x `allin` (cte 0,n-1) -- all x elements are in [0..n-1]
d `allin` (cte 1,n-1) -- all d elements are in [1..n-1]
allDiff x -- all x elements are different
allDiff d -- all d elements are different
x @!! 0 @< x @!! 1 -- some symmetry breaking
d @!! 0 @> d ! (n-2) -- some symmetry breaking
return x -- return the list itself
main = example_sat_main_single_expr model
|
neothemachine/monadiccp
|
examples/AllInterval.hs
|
Haskell
|
bsd-3-clause
| 1,386
|
{-# LANGUAGE RankNTypes #-}
module Toplevel where
import Data.Char(isAlpha,isDigit)
import Data.List(partition,(\\),nub,find,deleteBy,sort)
import Data.Map(Map,toList)
import System.IO
import Version(version,buildtime)
import Syntax
import ParserDef(getInt,pCommand,parseString,Command(..)
,program,parseHandle)
import LangEval(Env(..),env0,eval,elaborate,Prefix(..),mPatStrict,extendV)
import Monads(FIO(..),unFIO,runFIO,fixFIO,fio,resetNext
,write,writeln,readln,unTc,tryAndReport,fio,fioFailD
,errF,report,writeRef,handleP)
import Auxillary(plist,plistf,backspace,Loc(..),extendL,DispInfo,DispElem(..),eitherM)
import SCC(topSortR)
import Control.Monad(when)
import Infer(TcEnv(sourceFiles,tyfuns),completionEntry,lineEditReadln,initTcEnv
,mode0,modes,checkDecs,imports,addListToFM,appendFM2
,var_env,type_env,rules,runtime_env,syntaxExt)
import RankN(pprint,Z,failD,disp0,dispRef)
import Manual(makeManual)
import Commands
import SyntaxExt(synName,synKey)
import System.Environment(getArgs)
import Data.Time.Clock(UTCTime,getCurrentTime)
import System.IO(hClose)
import Control.Exception(try,IOException)
import System.FilePath(splitFileName)
import System.Directory(setCurrentDirectory,getDirectoryContents,getModificationTime)
-- import System.Console.Readline(setCompletionEntryFunction)
-- setCompletionEntryFunction :: Maybe (String -> IO [String]) -> IO ()
-------------------------------------------------------------
-- The programmer interface: the top level loop.
-- it performs the read-eval-typecheck-print loop.
-- It catches exceptions, and ties all the other pieces together.
----------------------------------------------
-- Perform one Read-Eval-Print action.
-- readEvalPrint :: [String] -> (TcEnv) -> FIO(TcEnv)
readEvalPrint commandTable sources tenv =
do { let tabExpandFun = completionEntry tenv
white c = elem c " \t\n"
; input <- lineEditReadln "prompt> " tabExpandFun
; z <- parseString pCommand input
; case z of
Left s -> do {writeln s; return (tenv) }
Right(x,rest) | all white rest ->
case x of
(ColonCom com str) -> dispatchColon commandTable tenv com str
(ExecCom e) -> execExp tenv e
(DrawCom p e) -> drawPatExp tenv p e
(LetCom d) -> letDec elabDs tenv d
(EmptyCom) -> return tenv
Right(x,rest) -> fail ("\nI parsed the command:\n "++show x++
"\nBut there was some trailing text: "++rest)
}
-- Repeat Read-Eval-Print until the :q command is given
topLoop commandTable sources env = tryAndReport
(do { fio(hFlush stdout)
; fio(writeRef dispRef disp0)
; env' <- (readEvalPrint commandTable sources init)
; topLoop commandTable (sourceFiles env') env'
}) (report (topLoop commandTable (sourceFiles init) init))
where init = (env{sourceFiles=sources})
------------------------------------------------------------------
-- Commands for load files, then going into the toplevel loop
------------------------------------------------------------------
-- load just the prelude and then go into the toplevel loop
main :: IO ()
main = runFIO(do { let sources = ["LangPrelude.prg"]
; writeln ("Loading source files = "++show sources)
; fio $ hSetBuffering stdout NoBuffering
; fio $ hSetBuffering stdin NoBuffering
; (env1,time) <-
tryAndReport (elabFile 0 "LangPrelude.prg" initTcEnv)
(report (return(initTcEnv,undefined)))
; let sources2 = sourceFiles env1
; topLoop (commandF (elabFile 0)) sources2 env1
; return () }) errF
-- Load the prelude and then load the file "s", and then go into the toplevel loop.
go :: String -> IO ()
go s =
runFIO(do { writeln (version++" -- Type ':?' for command line help."++"\n\n")
; let sources = ["LangPrelude.prg",s]
; writeln ("Loading source files = "++show sources)
; writeln "loading the prelude (LangPrelude.prg)"
; (env,time) <- tryAndReport (elabFile 0 "LangPrelude.prg" initTcEnv)
(report (return (initTcEnv,undefined)))
; (env2,time2) <- elabFile 0 s env
; let sources2 = sourceFiles env2
; topLoop (commandF (elabFile 0)) sources2 env2
; return () }) errF
-- Don't load the prelude, just load "s" then go into the toplevel loop.
run :: String -> IO ()
run s = runFIO(do { let (dir,name) = splitFileName s
; fio (when (not (null dir)) (setCurrentDirectory dir))
; writeRef modes mode0
; writeln ("Loading source files = "++show [s])
; let init = (initTcEnv{sourceFiles = [s]})
; (env1,time) <- tryAndReport (elabFile 0 s init)
(report (return (init,undefined)))
; let sources2 = sourceFiles env1
; topLoop (commandF (elabFile 0)) sources2 env1
; return () }) errF
-- Try to load a file, if it fails for any reason, exit the program
-- with an unrecoverable error. Used in testing, where failure means
-- a major error, something very bad (and unexpected), has happened
try_to_load s =
runFIO(do { writeln ("loading "++s)
; (env1,time) <- tryAndReport (elabFile 0 s initTcEnv) err2
; writeln (s++" successfully loaded")
; return () }) errF
where err2 loc mess = error ("At "++show loc++"\n"++mess)
-- Get the file to "run" from the command line arguments, then "run" it
omega :: IO()
omega =
do { args <- getArgs
; putStr (version++"\n")
; putStr ("Build Date: "++buildtime++"\n\n")
; putStr "Type ':?' for command line help.\n"
; case args of
[] -> run "LangPrelude.prg"
("-tests" : dir : _) -> alltests dir
("-manual" : dir : _) -> makeManual dir
(file : _) -> run file
}
-------------------------------------------------------------------------------
-- elabDs is the interface to everything. Elaborates a mutually recursive [Dec]
-- other functions read the [Dec] from files and call this function
elabDs :: [Dec] -> TcEnv -> FIO TcEnv
elabDs ds (tenv) =
do { let nam (Global s) = s
-- ; write ((display (map nam (concat (map decname ds))))++" ")
; (tenv1,ds1,cs1) <- checkDecs tenv ds -- type check the list
--; mapM (writeln .show) ds
--; mapM (writeln . show) ds1
; when (not (null cs1))
(fioFailD 3 disp0 [Ds "Unsolved constraints (type 2): ",Ds (show cs1)])
; env1 <- elaborate None ds1 (runtime_env tenv) -- evaluate the list
; return(tenv1 { runtime_env = env1 })
}
display [s] = s
display ss = plistf id "(" ss " " ")"
------------------------------------------------------------------
-- Get a [Dec] from a file name
parseDecs :: String -> FIO [Dec]
parseDecs file =
do { hndl <- eitherM (fio (try $ openFile file ReadMode))
(\ err -> fail ("\nProblem opening file: "++file++" ("++show (err :: IOException)++")"))
return
; let err mess = fio (hClose hndl >> fail mess) -- if parsing fails, we should close the file
; x <- handleP (const True) 10
(fio $ parseHandle program file hndl) err
; fio (hClose hndl)
; case x of
Left s -> fail s
Right (Program ds) -> return ds
}
-------------------------------------------------------------------------
-- Omega has a very simple importing mechanism. A user writes:
-- import "xx.prg" (f,g,T)
-- to import the file named "xx.prg", all symbols with names "f", "g", "T"
-- (no matter what namespace they appear in) are imported into the
-- current environment. Usually "xx.prg" is a complete path as Omega's
-- notion of current directory is quite primitive.
-- import "xx.prg" means import everything from "xx.prg"
importP (Import s vs) = True
importP _ = False
importName (Import s vs) = s
------------------------------------------------------------
-- Read a [Dec] from a file, then split it into imports and
-- binding groups, uses elabDs to do the work.
indent n = replicate ((n-1)*3) ' '
nameOf (name,time,deps,env) = name
elabFile :: Int -> String -> TcEnv -> FIO(TcEnv, UTCTime)
elabFile count file (tenv) =
do { time <- fio getCurrentTime
; all <- parseDecs file
; let (importL,ds) = partition importP all
(dss,pairs) = topSortR freeOfDec ds
; writeln(indent count++"->Loading import "++basename file)
; (tenv2,importList) <- importManyFiles (count + 1) importL tenv
--; mapM (writeln . (++"\n"). show) ds
--; writeln ("\nelaborating "++file++"\n"++show(map freeOfDec ds)++"\n pairs\n"++show pairs)
-- Check for multiple definitions in the file
; multDef ds (concat (map fst pairs))
-- Check if any names are already declared
; mapM (notDup tenv2 file) (foldr (\ (exs,deps) ss -> exs++ss) [] pairs)
; tenv3 <- foldF elabDs (tenv2) dss
; let tenv4 = adjustImports file time importList tenv3
; writeln ((indent (count+1))++"<-"++file++" loaded.")
; return (tenv4,time)
}
adjustImports name time deps new = new2
where -- a little recursive knot tying so the env being defined (new2) is
-- also stored in the imports list of the function being added
new2 = new {imports = m : (filter pred (imports new))}
m = (name,time,deps,new2)
keepers = map fst deps
pred (nm1,time1,deps1,env1) = elem nm1 keepers
addI [] old = old
addI ((m@(nm,time,deps,env)):more) old = (nm,time,deps,env): (addI more (deleteBy same m old))
where same (nm1,time1,deps1,env1) (nm2,time2,deps2,env2) = nm1 == nm2
lookupDeps nm env = case find match (imports env) of
Nothing -> fail ("Unknown module when lokking up dependency list: "++nm)
Just(nm,time,deps,env) -> return deps
where match (name,time,deps,env) = name==nm
showimp message env = message++plistf nameOf "(" (imports env) "," ")."
importManyFiles:: Int -> [Dec] -> TcEnv -> FIO (TcEnv, [(String, UTCTime)])
importManyFiles count [] tenv = return (tenv,[])
importManyFiles count (d:ds) tenv =
do { (next,name,time) <- importFile count d tenv
; (next2,ts) <- importManyFiles count ds next
; return(next2,(name,time):ts) }
importFile :: Int -> Dec -> TcEnv -> FIO(TcEnv,String,UTCTime)
importFile count (Import name vs) tenv =
case find (\(nm,time,deps,env)->name==nm) (imports tenv) of
Just (nm,time,deps,env) ->
do { writeln (indent count++"Import "++name++" already loaded.")
; return (importNames nm vs env tenv,nm,time) }
Nothing -> do { (new,time) <- elabFile count name tenv
; deps <- lookupDeps name new
; unknownExt vs (syntaxExt new)
; let new2 = adjustImports name time deps new
; return(importNames name vs new2 tenv,name,time) }
importNames :: String -> Maybe [ImportItem] -> TcEnv -> TcEnv -> TcEnv
importNames name items new old =
old { imports = addI (imports new) (imports old)
, var_env = addListToFM (var_env old) (filter okToAddVar (toList (var_env new)))
, type_env = filter q (type_env new) ++ type_env old
, runtime_env = add (runtime_env new) (runtime_env old)
, rules = appendFM2 (rules old) (filter p2 (toList (rules new)))
, syntaxExt = addSyntax syntax (syntaxExt new) (syntaxExt old)
, tyfuns = filter okToAddTyFun (tyfuns new) ++ tyfuns old
}
where elemOf x Nothing = True -- No import list, so everything is imported
elemOf x (Just vs) = elem x vs -- is it in the import list?
okToAddVar :: forall a . (Var, a) -> Bool
okToAddVar (x,y) = elemOf x vs
okToAddTyFun (x,y) = elemOf (Global x) vs
p2 (s,y) = elemOf (Global s) vs
q (str,tau,polyk) = elemOf (Global str) vs
add (Ev xs) (Ev ys) = Ev (filter okToAddVar xs ++ ys)
accV (VarImport v) vs = v:vs -- used to fold over the runtime environment
accV _ vs = vs
accSyn (SyntaxImport nm tag) vs = (nm,tag):vs -- fold over syntax imports
accSyn _ vs = vs
(vs,syntax) = case items of
Just zs -> (Just(foldr accV [] zs),Just(foldr accSyn [] zs))
Nothing -> (Nothing,Nothing)
addSyntax Nothing new old = new ++ old
addSyntax (Just imports) new old = foldr acc old new
where acc ext old = if (synName ext,synKey ext) `elem` imports
then ext:old else old
unknownExt Nothing new = return ()
unknownExt (Just []) new = return ()
unknownExt (Just(VarImport x : xs)) new = unknownExt (Just xs) new
unknownExt (Just(SyntaxImport nm tag : xs)) new =
if any good new
then unknownExt (Just xs) new
else fail ("\nImporting unknown extension: "++nm++"("++tag++")")
where good ext = synName ext == nm && synKey ext == tag
multDef :: [Dec] -> [Var] -> FIO ()
multDef ds names = if null dups then return () else fail (foldr report "" dups)
where dups = nub(names \\ nub names)
locs = concat(map decloc ds)
report :: Var -> String -> String
report nm s = show nm ++ " is multiply defined at lines "++show (foldr acc [] locs)++"\n"++s
where acc (name,SrcLoc _ line col) ls = if nm==name then line:ls else ls
acc (name,Z) ls = ls
-----------------------------------------------------
-- this command is for the maintainers of Omega, it tries
-- to load all the files in the TestPrograms directory with
-- extension ".prg" It is used to exercise Omega.
alltests dir =
do { setCurrentDirectory dir
; files' <- getDirectoryContents "."
; let ok x = case reverse x of { ('g':'r':'p':'.':_) -> True; _ -> False}
; let files = sort files'
; print (filter ok files)
; mapM try_to_load (filter ok files)
; setCurrentDirectory ".."
}
-------------------------------------------------------------------------------
------------------------------------------------------------------
-- Some shortcuts to running the interpreter
work = run "work.prg"
ky = run "D:/IntelWork/Kyung2.prg"
bad = run "D:/work/sheard/research/omega/badPrograms/shaped.prg"
qq = run "d:/LogicBlox/Code/LogicMetaGenerator/Text/meaning.prg"
add = run "D:/IntelWork/adder.prg"
temp = run "D:/IntelWork/temp.prg"
circ = run "Examples/RecursiveCircuit.prg"
parse = run "Examples/Parser.prg"
tests = go "tests.prg"
tm = go "toMetaMl.prg"
q s = go ("C:/tmp/OmegaExamples/"++s++".prg")
|
cartazio/omega
|
src/Toplevel.hs
|
Haskell
|
bsd-3-clause
| 14,847
|
module Get.Init where
import System.IO (hFlush, stdout)
import Data.Aeson.Encode.Pretty (encodePretty)
import qualified Elm.Package.Name as N
import qualified Elm.Package.Version as V
import qualified Elm.Package.Description as D
import qualified Elm.Package.Paths as Path
import qualified Data.ByteString.Lazy as BS
askForChecked :: (String -> Either String a) -> String -> IO a
askForChecked check request = do
putStr $ request ++ " "
hFlush stdout
answer <- getLine
case check answer of
Right result -> return result
Left message -> do putStrLn message
askForChecked check request
eitherFromMaybe :: a -> Maybe b -> Either a b
eitherFromMaybe def val = case val of
Just r -> Right r
Nothing -> Left def
injectDefault :: Maybe [a] -> [a] -> [a]
injectDefault (Just xs) [] = xs
injectDefault _ ys = ys
askForVersion :: Maybe String -> String -> IO V.Version
askForVersion def req = askForChecked check req
where check = (eitherFromMaybe "Wrong version format!" . V.fromString . injectDefault def)
askFor :: String -> IO String
askFor req = askForChecked Right req
askForWithDefault :: String -> String -> IO String
askForWithDefault def req = askForChecked (Right . injectDefault (Just def)) req
askForLimited :: String -> Int -> String -> IO String
askForLimited name limit req = askForChecked check req
where check str = if length str > limit
then Left errorMessage
else Right str
errorMessage = concat [ name
, " length shouldn't exceed "
, show limit
, " characters!"]
readDeps :: IO D.Description
readDeps = do
projectName <- askFor "Project name:"
userName <- askFor "Github user name:"
version <- askForVersion (Just "0.1.0") "Initial version? [default: 0.1.0]"
summary <- askForLimited "Summary" 80 "Summary:"
description <- askFor "Description:"
license <- askForWithDefault "BSD3" "License? [default: BSD3]"
repo <- askFor "Repository address?"
elmVersion <- askForVersion Nothing "Elm version?"
return $ D.Description (N.Name userName projectName) version summary description license repo [] [] elmVersion [] []
initialize :: IO ()
initialize = do
dependencies <- readDeps
BS.writeFile Path.description (encodePretty dependencies)
|
laszlopandy/elm-package
|
src/Get/Init.hs
|
Haskell
|
bsd-3-clause
| 2,361
|
module ShouldCompile where
(<>) :: (a -> Maybe b) -> (b -> Maybe c) -> (a -> Maybe c)
(m1 <> m2) a1 = case m1 a1 of
Nothing -> Nothing
Just a2 -> m2 a2
|
ezyang/ghc
|
testsuite/tests/parser/should_compile/read026.hs
|
Haskell
|
bsd-3-clause
| 209
|
{-# LANGUAGE DataKinds #-}
{-# LANGUAGE GADTs #-}
{-# LANGUAGE KindSignatures #-}
{-# LANGUAGE PolyKinds #-}
{-# LANGUAGE TemplateHaskell #-}
{-# LANGUAGE TypeFamilies #-}
{-# LANGUAGE TypeOperators #-}
module TH_RichKinds2 where
import Data.Char
import Data.List
import Language.Haskell.TH
$(return [OpenTypeFamilyD (TypeFamilyHead
(mkName "Map") [KindedTV (mkName "f")
(AppT (AppT ArrowT (VarT (mkName "k1")))
(VarT (mkName "k2"))),
KindedTV (mkName "l")
(AppT ListT
(VarT (mkName "k1")))]
(KindSig (AppT ListT (VarT (mkName "k2")))) Nothing)])
$( let fixKs :: String -> String -- need to remove TH renaming index from k variables
fixKs s =
case (elemIndex 'k' s) of
Nothing -> s
Just i ->
if i == (length s) || (s !! (i+1) /= '_') then s else
let (prefix, suffix) = splitAt (i+2) s -- the +2 for the "k_"
(index, rest) = span isDigit suffix in
if length index == 0 then s else
prefix ++ "0" ++ (fixKs rest)
in
do decls <- [d| data SMaybe :: (k -> *) -> (Maybe k) -> * where
SNothing :: SMaybe s 'Nothing
SJust :: s a -> SMaybe s ('Just a)
type instance Map f '[] = '[]
type instance Map f (h ': t) = ((f h) ': (Map f t))
|]
reportWarning (fixKs (pprint decls))
return decls )
data SBool :: Bool -> * where
SFalse :: SBool 'False
STrue :: SBool 'True
mbool :: SMaybe SBool ('Just 'False)
mbool = SJust SFalse
|
ezyang/ghc
|
testsuite/tests/th/TH_RichKinds2.hs
|
Haskell
|
bsd-3-clause
| 1,698
|
{-# LANGUAGE TypeFamilies #-}
module B where
class Entity v where
data Fields v
instance Show (Fields v) where show = undefined
|
wxwxwwxxx/ghc
|
testsuite/tests/driver/T5147/B1.hs
|
Haskell
|
bsd-3-clause
| 134
|
module ShouldFail where
data Foo = MkFoo Bool
instance Eq Foo where
(MkFoo x) == (MkFoo y) = x == y
instance Eq Foo where
-- forgot to type "Ord" above
(MkFoo x) <= (MkFoo y) = x <= y
|
urbanslug/ghc
|
testsuite/tests/typecheck/should_fail/tcfail056.hs
|
Haskell
|
bsd-3-clause
| 200
|
{-# LANGUAGE OverloadedStrings #-}
module Store.SQL.Util.Pivots where
{-- Pivot tables ---------------------------------------------------------------
Last week you were able to scan the database, extract rows of name(s), parse
the names, then store them as parsed entities in the database connected to the
source article via a join-table. These join tables, also known as 'pivot
tables,' are prevalent and follow a certain structure. Instead of the specific
ArticlePersonJoin structure, we'll declare and use the general Pivot type here.
--}
import Control.Arrow (second)
import Control.Monad.State
import Data.Aeson
import qualified Data.Map as Map
import qualified Data.Set as Set
import Database.PostgreSQL.Simple
import Database.PostgreSQL.Simple.SqlQQ
import Database.PostgreSQL.Simple.ToRow
import Database.PostgreSQL.Simple.FromRow
import Database.PostgreSQL.Simple.ToField
-- below import available via 1HaskellADay git repository
import Data.MemoizingTable (MemoizingState, MemoizingTable(MT))
import qualified Data.MemoizingTable as MT
import Store.SQL.Util.Indexed
data Pivot = Pvt { srcIx, trgId :: Integer }
deriving (Eq, Ord, Show)
-- creates a Pivot value from source and destination table indices
joinValue :: Indexed i => i -> Index -> Pivot
joinValue i j = Pvt (idx i) (idx j)
-- and a pivot as a pair:
toTup :: Pivot -> (Integer, Integer)
toTup (Pvt a b) = (a, b)
-- and now we need a pivot-inserter
instance ToRow Pivot where
toRow (Pvt i j) = map toField [i,j]
-- and to insert the pivots is simply using the pivot table insert statement,
-- in this case insertArtPersJoinStmt, with the inserter function.
{-- e.g.:
>>> inserter conn insertArtPersJoinStmt (zipWith joinValue pers ixpers)
--}
instance FromRow Pivot where
fromRow = Pvt <$> field <*> field
instance ToJSON Pivot where
toJSON (Pvt k1 k2) = object ["article_id" .= k1, "subject_id" .= k2]
{--
This happens frequently: we need to tie together a symbol table to the primary
table. The symbol table is built in a MemoizingTable
--}
buildPivots :: Ord val => Monad m => MemoizingState m [Pivot] Integer val
buildPivots = get >>= \(MT _ keys _, joins) ->
return (map (uncurry Pvt)
(concatMap (sequence . (second (map (keys Map.!))))
(Map.toList joins)))
{--
Keywords, Authors, and sections. All three follow the same pattern:
* fetch previously stored values
* memoize
* materials values from article set, see which values are new
* store new values, relate all values.
If we are following the same pattern, is there a generalization of the
functions for all of this, so we can have one function that does the work
for any memoize-y type? What would this function look like?
--}
memoizeStore :: ToRow a => Ord a =>
Connection
-> (Connection -> IO [IxValue a])
-> (Connection -> [a] -> IO [Index])
-> Query
-> (article -> [a])
-> [IxValue article]
-> IO ()
memoizeStore conn fetcher storer pivotQuery getter ixarts =
-- so, 1. we need to fetch currently-stored values and start the MemoizingTable
fetcher conn >>= \ixvals ->
let memtable = MT.start (map ix2tup ixvals)
(ids,arts) = unzip (map ix2tup ixarts)
stat = execState (zipWithM_ MT.triageM ids (map getter arts))
(memtable,Map.empty)
substate = Set.toList (MT.newValues (fst stat))
in storer conn substate >>= \ixnewvals ->
let table = MT.update (zip (map idx ixnewvals) substate) (fst stat)
in void (executeMany conn pivotQuery
(evalState buildPivots (table, snd stat)))
|
geophf/1HaskellADay
|
exercises/HAD/Store/SQL/Util/Pivots.hs
|
Haskell
|
mit
| 3,684
|
{-|
Module : Types
Description : Example of types operations in Haskell
Copyright : (c) Fabrício Olivetti, 2017
License : GPL-3
Maintainer : fabricio.olivetti@gmail.com
A sample of operations with different types.
-}
module Main where
-- |'soma' sums two integer values
soma :: Integer -> Integer -> Integer
soma x y = x + y
-- |'aurea' defines the golden number
aurea = (1 + sqrt 5) / 2.0
-- |'bissexto' returns whether 'ano' is a leap year (ugly version)
bissexto ano = (ano `rem` 400 == 0) || ((ano `rem` 4 == 0) && (ano `rem` 100 /= 0))
-- |'main' executa programa principal
main :: IO ()
main = do
print (soma 1 3)
print aurea
print (bissexto 2018)
|
folivetti/BIGDATA
|
02 - Básico/Types.hs
|
Haskell
|
mit
| 685
|
{-# LANGUAGE OverloadedStrings #-}
module Compiler.Types where
import Constants
import Control.Monad (mzero)
import CoreTypes
import Data.Aeson (FromJSON (..), ToJSON (..), Value (..), object,
(.:), (.=))
import Language.Types
import Monad
import Types
data Deployment = Put
{ deployment_srcs :: [FilePath]
, deployment_dst :: FilePath
, deployment_kind :: DeploymentKind
} deriving Eq
instance Show Deployment where
show dep = unwords $ srcs ++ [k,dst]
where
srcs = map quote $ deployment_srcs dep
k = case deployment_kind dep of
LinkDeployment -> linkKindSymbol
CopyDeployment -> copyKindSymbol
dst = quote $ deployment_dst dep
quote = (\s -> "\"" ++ s ++ "\"")
instance FromJSON Deployment where
parseJSON (Object o)
= Put <$> o .: "sources"
<*> o .: "destination"
<*> o .: "deployment kind"
parseJSON _ = mzero
instance ToJSON Deployment where
toJSON depl
= object
[ "sources" .= deployment_srcs depl
, "destination" .= deployment_dst depl
, "deployment kind" .= deployment_kind depl
]
type CompilerPrefix = [PrefixPart]
data PrefixPart
= Literal String
| Alts [String]
deriving (Show, Eq)
data CompilerState = CompilerState
{ state_deployment_kind_override :: Maybe DeploymentKind
, state_into :: Directory
, state_outof_prefix :: CompilerPrefix
} deriving (Show, Eq)
type PrecompileError = String
type ImpureCompiler = ExceptT CompileError (ReaderT SparkConfig IO)
type PureCompiler = ExceptT CompileError (ReaderT SparkConfig Identity)
type Precompiler = WriterT [PrecompileError] Identity
type InternalCompiler = StateT CompilerState (WriterT ([Deployment], [CardReference]) PureCompiler)
|
badi/super-user-spark
|
src/Compiler/Types.hs
|
Haskell
|
mit
| 1,955
|
-- Traits type class
-- ref: https://wiki.haskell.org/Traits_type_class
-- ref: https://wiki.haskell.org/Reified_type
-- blog: http://augustss.blogspot.nl/2007/04/overloading-haskell-numbers-part-3.html
-- Occasionally you want to associate information with a type, not just a value. An example is the standard Bounded class:
class Bounded a where
minBound :: a
maxBound :: a
-- However, this technique does not work if the information which you want to extract doesn't have the type in its signature. One example is floating point format information, such as:
class FloatTraits a where
-- This one is fine
epsilon :: a
-- This one is not
mantissaDigits :: Int
-- etc
-- The problem is that there is simply no way to tell Haskell which version of mantissaDigits you want, because the Type parameter a does not appear in its type signature anywhere.
-- The solution is to pass a Reified type as a phantom argument:
class FloatTraits a where
mantissaDigits :: a -> Int
instance FloatTraits Float where
mantissaDigits _ = 24
instance FloatTraits Double where
mantissaDigits _ = 53
-- This technique works well in conjunction with Functional dependencies. For example, there may be some float types for which an Int may not be sufficient to express the number of digits in the mantissa:
class (Integral i) => FloatTraits a i | a -> i where
mantissaDigits :: a -> i
instance FloatTraits Float Int where
mantissaDigits _ = 24
instance FloatTraits ArbitraryPrecisionFloat Integer where
mantissaDigits x = {- detail omitted -}
-- You can also use this technique as an alternative to Higher order functions in cases where you need several functions which work together.
-- Consider, for example, converting strings from upper case to lower case and back again. This in general depends on the language that you are operating in. The lower case version of 'A', for example, is different in English than in Greek. You can wrap this up in a typeclass parameterised on language:
class CaseConvert language where
toUpperCase :: language -> String -> String
toLowerCase :: language -> String -> String
data EnglishLanguage = EnglishLanguage
instance CaseConvert EnglishLanguage where
toUpperCase _ s = {- etc -}
toLowerCase _ s = {- etc -}
data GreekLanguage = GreekLanguage
instance CaseConvert GreekLanguage where
toUpperCase _ s = {- etc -}
toLowerCase _ s = {- etc -}
-- Reified type
{-
To "reify" something is to take something that is abstract and regard it as material. A classic example is the way that the ancients took abstract concepts (e.g. "victory") and turned them into deities (e.g. Nike, the Greek goddess of victory).
A reified type is a value that represents a type. Using reified types instead of real types means that you can do any manipulations with them that you can do with values.
In Haskell, the value undefined is a member of every (boxed) type, so that is often a good value to use to represent a type, assuming you don't need to break it apart.
-}
|
Airtnp/Freshman_Simple_Haskell_Lib
|
Idioms/Traits-type-class.hs
|
Haskell
|
mit
| 3,073
|
module Debug.Debug (
DebugShow,
debug_show,
) where
class DebugShow a where
debug_show :: a -> String
|
quintenpalmer/fresh
|
haskell/src/Debug/Debug.hs
|
Haskell
|
mit
| 115
|
-- Convert string to camel case
-- http://www.codewars.com/kata/517abf86da9663f1d2000003/
module CamelCase where
import Data.Char (toUpper)
import Data.List.Split (split, dropDelims, oneOf)
toCamelCase :: String -> String
toCamelCase str = f . split (dropDelims $ oneOf "-_") $ str
where f [] = []
f [x] = x
f (x:xs) = x ++ concatMap g xs
g [] = []
g (x:xs) = toUpper x : xs
|
gafiatulin/codewars
|
src/5 kyu/CamelCase.hs
|
Haskell
|
mit
| 422
|
{-# LANGUAGE CPP #-}
{-# LANGUAGE ViewPatterns #-}
module Hpack.Render.Hints (
FormattingHints (..)
, sniffFormattingHints
#ifdef TEST
, extractFieldOrder
, extractSectionsFieldOrder
, sanitize
, unindent
, sniffAlignment
, splitField
, sniffIndentation
, sniffCommaStyle
#endif
) where
import Data.Char
import Data.Maybe
import Data.List
import Control.Applicative
import Hpack.Render.Dsl
data FormattingHints = FormattingHints {
formattingHintsFieldOrder :: [String]
, formattingHintsSectionsFieldOrder :: [(String, [String])]
, formattingHintsAlignment :: Maybe Alignment
, formattingHintsRenderSettings :: RenderSettings
} deriving (Eq, Show)
sniffFormattingHints :: [String] -> FormattingHints
sniffFormattingHints (sanitize -> input) = FormattingHints {
formattingHintsFieldOrder = extractFieldOrder input
, formattingHintsSectionsFieldOrder = extractSectionsFieldOrder input
, formattingHintsAlignment = sniffAlignment input
, formattingHintsRenderSettings = sniffRenderSettings input
}
sanitize :: [String] -> [String]
sanitize = filter (not . isPrefixOf "cabal-version:") . filter (not . null) . map stripEnd
stripEnd :: String -> String
stripEnd = reverse . dropWhile isSpace . reverse
extractFieldOrder :: [String] -> [String]
extractFieldOrder = map fst . catMaybes . map splitField
extractSectionsFieldOrder :: [String] -> [(String, [String])]
extractSectionsFieldOrder = map (fmap extractFieldOrder) . splitSections
where
splitSections input = case break startsWithSpace input of
([], []) -> []
(xs, ys) -> case span startsWithSpace ys of
(fields, zs) -> case reverse xs of
name : _ -> (name, unindent fields) : splitSections zs
_ -> splitSections zs
startsWithSpace :: String -> Bool
startsWithSpace xs = case xs of
y : _ -> isSpace y
_ -> False
unindent :: [String] -> [String]
unindent input = map (drop indentation) input
where
indentation = minimum $ map (length . takeWhile isSpace) input
sniffAlignment :: [String] -> Maybe Alignment
sniffAlignment input = case nub . catMaybes . map indentation . catMaybes . map splitField $ input of
[n] -> Just (Alignment n)
_ -> Nothing
where
indentation :: (String, String) -> Maybe Int
indentation (name, value) = case span isSpace value of
(_, "") -> Nothing
(xs, _) -> (Just . succ . length $ name ++ xs)
splitField :: String -> Maybe (String, String)
splitField field = case span isNameChar field of
(xs, ':':ys) -> Just (xs, ys)
_ -> Nothing
where
isNameChar = (`elem` nameChars)
nameChars = ['a'..'z'] ++ ['A'..'Z'] ++ "-"
sniffIndentation :: [String] -> Maybe Int
sniffIndentation input = sniffFrom "library" <|> sniffFrom "executable"
where
sniffFrom :: String -> Maybe Int
sniffFrom section = case findSection . removeEmptyLines $ input of
_ : x : _ -> Just . length $ takeWhile isSpace x
_ -> Nothing
where
findSection = dropWhile (not . isPrefixOf section)
removeEmptyLines :: [String] -> [String]
removeEmptyLines = filter $ any (not . isSpace)
sniffCommaStyle :: [String] -> Maybe CommaStyle
sniffCommaStyle input
| any startsWithComma input = Just LeadingCommas
| any (startsWithComma . reverse) input = Just TrailingCommas
| otherwise = Nothing
where
startsWithComma = isPrefixOf "," . dropWhile isSpace
sniffRenderSettings :: [String] -> RenderSettings
sniffRenderSettings input = RenderSettings indentation fieldAlignment commaStyle
where
indentation = fromMaybe (renderSettingsIndentation defaultRenderSettings) (sniffIndentation input)
fieldAlignment = renderSettingsFieldAlignment defaultRenderSettings
commaStyle = fromMaybe (renderSettingsCommaStyle defaultRenderSettings) (sniffCommaStyle input)
|
haskell-tinc/hpack
|
src/Hpack/Render/Hints.hs
|
Haskell
|
mit
| 3,834
|
{-# LANGUAGE FlexibleContexts #-}
{-
Copyright (C) 2012 Kacper Bak <http://gsd.uwaterloo.ca>
Permission is hereby granted, free of charge, to any person obtaining a copy of
this software and associated documentation files (the "Software"), to deal in
the Software without restriction, including without limitation the rights to
use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
of the Software, and to permit persons to whom the Software is furnished to do
so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
-}
module Language.Clafer.Intermediate.ResolverInheritance where
import Control.Applicative
import Control.Monad
import Control.Monad.Error
import Control.Monad.State
import Data.Maybe
import Data.Graph
import Data.Tree
import Data.List
import qualified Data.Map as Map
import Language.ClaferT
import Language.Clafer.Common
import Language.Clafer.Front.Absclafer
import Language.Clafer.Intermediate.Intclafer
import Language.Clafer.Intermediate.ResolverName
-- | Resolve Non-overlapping inheritance
resolveNModule :: (IModule, GEnv) -> Resolve (IModule, GEnv)
resolveNModule (imodule, genv') =
do
let decls' = _mDecls imodule
decls'' <- mapM (resolveNElement decls') decls'
return (imodule{_mDecls = decls''}, genv' {sClafers = bfs toNodeShallow $ toClafers decls''})
resolveNClafer :: [IElement] -> IClafer -> Resolve IClafer
resolveNClafer declarations clafer =
do
super' <- resolveNSuper declarations $ _super clafer
elements' <- mapM (resolveNElement declarations) $ _elements clafer
return $ clafer {_super = super',
_elements = elements'}
resolveNSuper :: [IElement] -> ISuper -> Resolve ISuper
resolveNSuper declarations x = case x of
ISuper False [PExp _ pid' pos' (IClaferId _ id' isTop')] ->
if isPrimitive id' || id' == "clafer"
then return x
else do
r <- resolveN pos' declarations id'
id'' <- case r of
Nothing -> throwError $ SemanticErr pos' $ "No superclafer found: " ++ id'
Just m -> return $ fst m
return $ ISuper False [idToPExp pid' pos' "" id'' isTop']
_ -> return x
resolveNElement :: [IElement] -> IElement -> Resolve IElement
resolveNElement declarations x = case x of
IEClafer clafer -> IEClafer <$> resolveNClafer declarations clafer
IEConstraint _ _ -> return x
IEGoal _ _ -> return x
resolveN :: Span -> [IElement] -> String -> Resolve (Maybe (String, [IClafer]))
resolveN pos' declarations id' =
findUnique pos' id' $ map (\x -> (x, [x])) $ filter _isAbstract $ bfsClafers $
toClafers declarations
-- | Resolve overlapping inheritance
resolveOModule :: (IModule, GEnv) -> Resolve (IModule, GEnv)
resolveOModule (imodule, genv') =
do
let decls' = _mDecls imodule
decls'' <- mapM (resolveOElement (defSEnv genv' decls')) decls'
return (imodule {_mDecls = decls''}, genv' {sClafers = bfs toNodeShallow $ toClafers decls''})
resolveOClafer :: SEnv -> IClafer -> Resolve IClafer
resolveOClafer env clafer =
do
super' <- resolveOSuper env {context = Just clafer} $ _super clafer
elements' <- mapM (resolveOElement env {context = Just clafer}) $ _elements clafer
return $ clafer {_super = super', _elements = elements'}
resolveOSuper :: SEnv -> ISuper -> Resolve ISuper
resolveOSuper env x = case x of
ISuper True exps' -> do
exps'' <- mapM (resolvePExp env) exps'
let isOverlap = not (length exps'' == 1 && isPrimitive (getSuperId exps''))
return $ ISuper isOverlap exps''
_ -> return x
resolveOElement :: SEnv -> IElement -> Resolve IElement
resolveOElement env x = case x of
IEClafer clafer -> IEClafer <$> resolveOClafer env clafer
IEConstraint _ _ -> return x
IEGoal _ _ -> return x
-- | Resolve inherited and default cardinalities
analyzeModule :: (IModule, GEnv) -> IModule
analyzeModule (imodule, genv') =
imodule{_mDecls = map (analyzeElement (defSEnv genv' decls')) decls'}
where
decls' = _mDecls imodule
analyzeClafer :: SEnv -> IClafer -> IClafer
analyzeClafer env clafer =
clafer' {_elements = map (analyzeElement env {context = Just clafer'}) $
_elements clafer'}
where
clafer' = clafer {_gcard = analyzeGCard env clafer,
_card = analyzeCard env clafer}
-- only for non-overlapping
analyzeGCard :: SEnv -> IClafer -> Maybe IGCard
analyzeGCard env clafer = gcard' `mplus` (Just $ IGCard False (0, -1))
where
gcard'
| _isOverlapping $ _super clafer = _gcard clafer
| otherwise = listToMaybe $ mapMaybe _gcard $
findHierarchy getSuper (clafers env) clafer
analyzeCard :: SEnv -> IClafer -> Maybe Interval
analyzeCard env clafer = _card clafer `mplus` Just card'
where
card'
| _isAbstract clafer = (0, -1)
| (isJust $ context env) && pGcard == (0, -1)
|| (isTopLevel $ _cinPos clafer) = (1, 1)
| otherwise = (0, 1)
pGcard = _interval $ fromJust $ _gcard $ fromJust $ context env
isTopLevel (Span (Pos _ c) _) = c==1
analyzeElement :: SEnv -> IElement -> IElement
analyzeElement env x = case x of
IEClafer clafer -> IEClafer $ analyzeClafer env clafer
IEConstraint _ _ -> x
IEGoal _ _ -> x
-- | Expand inheritance
resolveEModule :: (IModule, GEnv) -> (IModule, GEnv)
resolveEModule (imodule, genv') = (imodule{_mDecls = decls''}, genv'')
where
decls' = _mDecls imodule
(decls'', genv'') = runState (mapM (resolveEElement []
(unrollableModule imodule)
False decls') decls') genv'
-- -----------------------------------------------------------------------------
unrollableModule :: IModule -> [String]
unrollableModule imodule = getDirUnrollables $
mapMaybe unrollabeDeclaration $ _mDecls imodule
unrollabeDeclaration :: IElement -> Maybe (String, [String])
unrollabeDeclaration x = case x of
IEClafer clafer -> if _isAbstract clafer
then Just (_uid clafer, unrollableClafer clafer)
else Nothing
IEConstraint _ _ -> Nothing
IEGoal _ _ -> Nothing
unrollableClafer :: IClafer -> [String]
unrollableClafer clafer
| _isOverlapping $ _super clafer = []
| getSuper clafer == "clafer" = deps
| otherwise = getSuper clafer : deps
where
deps = (toClafers $ _elements clafer) >>= unrollableClafer
getDirUnrollables :: [(String, [String])] -> [String]
getDirUnrollables dependencies = (filter isUnrollable $ map (map v2n) $
map flatten (scc graph)) >>= map fst3
where
(graph, v2n, _) = graphFromEdges $map (\(c, ss) -> (c, c, ss)) dependencies
isUnrollable (x:[]) = fst3 x `elem` trd3 x
isUnrollable _ = True
-- -----------------------------------------------------------------------------
resolveEClafer :: MonadState GEnv m => [String] -> [String] -> Bool -> [IElement] -> IClafer -> m IClafer
resolveEClafer predecessors unrollables absAncestor declarations clafer = do
sClafers' <- gets sClafers
clafer' <- renameClafer absAncestor clafer
let predecessors' = _uid clafer' : predecessors
(sElements, super', superList) <-
resolveEInheritance predecessors' unrollables absAncestor declarations
(findHierarchy getSuper sClafers' clafer)
let sClafer = Map.fromList $ zip (map _uid superList) $ repeat [predecessors']
modify (\e -> e {stable = Map.delete "clafer" $
Map.unionWith ((nub.).(++)) sClafer $
stable e})
elements' <-
mapM (resolveEElement predecessors' unrollables absAncestor declarations)
$ _elements clafer
return $ clafer' {_super = super', _elements = elements' ++ sElements}
renameClafer :: MonadState GEnv m => Bool -> IClafer -> m IClafer
renameClafer False clafer = return clafer
renameClafer True clafer = renameClafer' clafer
renameClafer' :: MonadState GEnv m => IClafer -> m IClafer
renameClafer' clafer = do
let claferIdent = _ident clafer
identCountMap' <- gets identCountMap
let count = Map.findWithDefault 0 claferIdent identCountMap'
modify (\e -> e { identCountMap = Map.alter (\_ -> Just (count+1)) claferIdent identCountMap' } )
return $ clafer { _uid = genId claferIdent count }
genId :: String -> Int -> String
genId id' count = concat ["c", show count, "_", id']
resolveEInheritance :: MonadState GEnv m => [String] -> [String] -> Bool -> [IElement] -> [IClafer] -> m ([IElement], ISuper, [IClafer])
resolveEInheritance predecessors unrollables absAncestor declarations allSuper
| _isOverlapping $ _super clafer = return ([], _super clafer, [clafer])
| otherwise = do
let superList = (if absAncestor then id else tail) allSuper
let unrollSuper = filter (\s -> _uid s `notElem` unrollables) $ tail allSuper
elements' <-
mapM (resolveEElement predecessors unrollables True declarations) $
unrollSuper >>= _elements
let super' = if (getSuper clafer `elem` unrollables)
then _super clafer
else ISuper False [idToPExp "" noSpan "" "clafer" False]
return (elements', super', superList)
where
clafer = head allSuper
resolveEElement :: MonadState GEnv m => [String] -> [String] -> Bool -> [IElement] -> IElement -> m IElement
resolveEElement predecessors unrollables absAncestor declarations x = case x of
IEClafer clafer -> if _isAbstract clafer then return x else IEClafer `liftM`
resolveEClafer predecessors unrollables absAncestor declarations clafer
IEConstraint _ _ -> return x
IEGoal _ _ -> return x
|
juodaspaulius/clafer-old-customBNFC
|
src/Language/Clafer/Intermediate/ResolverInheritance.hs
|
Haskell
|
mit
| 10,166
|
{-# LANGUAGE OverloadedStrings #-}
{-# LANGUAGE TemplateHaskell #-}
module Cilia.Config where
import Prelude
import Data.Maybe(fromMaybe)
import qualified Data.Text as T
import Data.Yaml( FromJSON(..)
, (.:)
, (.:?)
, decodeFileEither)
import qualified Data.Yaml as Y
import Lens.Micro.TH(makeLenses)
data DefaultSection =
DefaultSection { _refreshInterval :: Int
}deriving(Eq, Show)
instance FromJSON DefaultSection where
parseJSON (Y.Object o) = DefaultSection <$>
fmap (fromMaybe 10) (o .:? "refreshInterval")
parseJSON _ = error "Expected travis section"
makeLenses ''DefaultSection
data TravisSection =
TravisSection { _userName :: T.Text
}deriving(Eq, Show)
instance FromJSON TravisSection where
parseJSON (Y.Object o) = TravisSection <$>
o .: "userName"
parseJSON _ = error "Expected travis section"
makeLenses ''TravisSection
data Config =
Config { _defaultSection :: DefaultSection
, _travis :: TravisSection
}deriving(Eq, Show)
instance FromJSON Config where
parseJSON (Y.Object o) = Config <$>
o .: "default" <*>
o .: "travis"
parseJSON _ = fail "Expected configuration object"
makeLenses ''Config
readConfig :: T.Text -> IO Config
readConfig configFileName =
either (error . show) id <$>
decodeFileEither (T.unpack configFileName)
|
bbiskup/cilia
|
src-lib/Cilia/Config.hs
|
Haskell
|
mit
| 1,446
|
module Y2017.M01.D02.Exercise where
import Data.List
import Data.Set (Set)
{--
Happy New Year, Haskellers.
Dr. Paul Coxon claims 2017 will be less divisive than 2016.
He is correct, but let's prove it.
1. What are the unique divisors of last year and this year?
--}
type Year = Int
uniqueFactors :: Year -> Set Int
uniqueFactors yr = undefined
{--
So:
uniqueFactors 2017 will get you {1,2017}
uniqueFactors 2016 will get you {1,4,7,8,9}
2. What are the unique prime factors of each of these years?
--}
primeFactors :: Year -> Set Int
primeFactors = undefined
{--
So:
primeFactors 2017 ~> {2017}
primeFactors 2016 ~> {2,3,7}
Question:
1. What years AD 2 - AD 2017 are prime?
--}
primeYears :: [Year] -> Set Year
primeYears yrs = undefined
-- 2. Order those years (AD 2 - AD 2017) by the number of their factors
-- 2a. Which year(s) had the most unique factors
-- 2b (or not 2b) (I love writing that) Which years had the most prime factors?
orderedYears :: (Year -> Set Int) -> [Year] -> [(Year, Set Int)]
orderedYears factoringFn years = undefined
-- Hint: you may wish to use Data.List.sortBy and Data.List.group to answer 2.
|
geophf/1HaskellADay
|
exercises/HAD/Y2017/M01/D02/Exercise.hs
|
Haskell
|
mit
| 1,147
|
{-# LANGUAGE OverloadedStrings #-}
module Utils.Password
( Password (Password)
, PasswordHash
, verifyPassword
, createPasswordHash
, writePasswordHashToFile
, readPasswordHashFromFile
)where
import Data.String (IsString(..))
import Data.Text (Text)
import Data.Text.Encoding (encodeUtf8)
import Data.ByteString (ByteString)
import qualified Data.ByteString as B
import Crypto.Hash (Digest, SHA256(..), hash, digestToHexByteString)
newtype Password =
Password { getPwd :: Text }
instance IsString Password where
fromString = Password . fromString
newtype PasswordHash =
PasswordHash { getHash :: ByteString }
deriving Eq
verifyPassword :: PasswordHash -> Password -> Bool
verifyPassword hash pwd =
hash == createPasswordHash pwd
createPasswordHash :: Password -> PasswordHash
createPasswordHash =
PasswordHash .
digestToHexByteString .
(hash :: ByteString -> Digest SHA256) .
encodeUtf8 .
getPwd
writePasswordHashToFile :: FilePath -> Password -> IO ()
writePasswordHashToFile path =
B.writeFile path . getHash . createPasswordHash
readPasswordHashFromFile :: FilePath -> IO PasswordHash
readPasswordHashFromFile path =
PasswordHash <$> B.readFile path
|
CarstenKoenig/MyWebSite
|
src/Utils/Password.hs
|
Haskell
|
mit
| 1,215
|
-- Smallest multiple
-- Problem 5
-- 2520 is the smallest number that can be divided by each of the numbers from 1 to 10 without any remainder.
-- What is the smallest positive number that is evenly divisible by all of the numbers from 1 to 20?
main = do
print (getSmallestN)
getSmallestN =
head [x | x <- [2520,2540..], all (isDivisibleBy x) [11..19]]
isDivisibleBy x y =
x `mod` y == 0
|
BrunoRB/haskell-problems
|
euler/euler5.hs
|
Haskell
|
mit
| 404
|
module GHCJS.DOM.MediaKeyEvent (
) where
|
manyoo/ghcjs-dom
|
ghcjs-dom-webkit/src/GHCJS/DOM/MediaKeyEvent.hs
|
Haskell
|
mit
| 43
|
module Main where
import Servant.Server
import Network.Wai.Handler.Warp
import Api
--------------------------------------------------------------------------------
main :: IO ()
main = do
run 3000 (serve api server)
|
sigrlami/servant-examples
|
servant-auth-basic/src/Main.hs
|
Haskell
|
mit
| 222
|
module Parser.Expression where
import Text.Parsec
import Text.Parsec.String
import Text.Parsec.Char
import Types
import Parser.Common
-- CALL --
call :: Parser Call
call = do
pos <- getPosition
name <- identifier
openParen
args <- expr `sepBy` (char ',')
closeParen
return $ Call pos name args
-- EXPRESSION --
expr :: Parser Expr
expr = do
spaces
intLit <|> charLit <|> boolLit <|> (try callExpr) <|> var
callExpr :: Parser Expr
callExpr = do
c <- call
return $ CallExpr c
var :: Parser Expr
var = do
pos <- getPosition
name <- identifier
return $ ExprVar $ Var pos name
intLit :: Parser Expr
intLit = do
pos <- getPosition
str <- (string "0") <|> do
a <- satisfy $ \c -> c `elem` ['1' .. '9']
b <- many digit
return $ a:b
let val = (read str) :: Int
return $ IntLitExpr $ IntLit pos val
charLit :: Parser Expr
charLit = do
pos <- getPosition
char '\''
c <- newLineChar <|> anyChar
char '\''
return $ CharLitExpr $ CharLit pos c
newLineChar :: Parser Char
newLineChar = do
char '\\'
char 'n'
return '\n'
boolLit :: Parser Expr
boolLit = do
spaces
pos <- getPosition
stringValue <- (string "true") <|> (string "false")
let value = stringValue == "true"
return $ BoolLitExpr $ BoolLit pos value
|
PelleJuul/popl
|
src/Parser/Expression.hs
|
Haskell
|
mit
| 1,279
|
{-# LANGUAGE FlexibleInstances #-}
import Control.Applicative
import Control.DeepSeq
import Data.List (sort,unfoldr)
import Data.Word
import System.Process
import System.Vacuum.Cairo (viewFile)
import Test.QuickCheck
view x = rnf x `seq` do
viewFile "temp.svg" x
system "opera temp.svg"
data Heap a = E | T a (Heap a) (Heap a) deriving Show
-- We actually require that the root has an empty right child. It irks me.
--data Heap a = EmptyHeap | Heap a (HeapTree a)
instance NFData a => NFData (Heap a) where
rnf E = ()
rnf (T x d r) = rnf (x,d,r)
size E = 0
size (T x a b) = 1 + size a + size b
isEmpty E = True
isEmpty _ = False
isProperHeap :: Word -> Heap Word -> Bool
isProperHeap x (T y d r) = x <= y && isProperHeap y d && isProperHeap x r
isProperHeap x E = True
isProperRoot E = True
isProperRoot (T x hs E) = isProperHeap x hs
isProperRoot _ = False
findMin (T x _ _) = x
insert x h = merge (T x E E) h
insertList xs h = foldl (flip insert) h xs
merge h E = h
merge E h = h
merge h1@(T x hs1 E) h2@(T y hs2 E)
| x <= y = T x (T y hs2 hs1) E
| otherwise = T y (T x hs1 hs2) E
mergePairs E = E
mergePairs h@(T x _ E) = h
mergePairs (T x hs1 (T y hs2 hs)) =
merge (merge (T x hs1 E) (T y hs2 E)) (mergePairs hs)
deleteMin (T _ hs E) = mergePairs hs
delete x = go
where
go E = E
go (T y d r)
| x == y = merge d r
| otherwise = T y (go d) (go r)
heapsort xs = unfoldr f (insertList xs E)
where
f E = Nothing
f h = Just (findMin h, deleteMin h)
deepCheck p = quickCheckWith (stdArgs { maxSuccess = 10000}) p
instance (Ord a, Arbitrary a) => Arbitrary (Heap a) where
arbitrary = frequency [(1, return E), (10, insert <$> arbitrary <*> arbitrary)]
{-shrink E = [E]
shrink x@(T _ E E) = [E]
shrink (T x h1 h2) = let xs = (T x <$> shrink h1 <*> shrink h2) in xs ++ concatMap shrink xs-}
newtype NonEmptyHeap a = NonEmptyHeap a
instance (Ord a, Arbitrary a) => Arbitrary (NonEmptyHeap (Heap a)) where
arbitrary = NonEmptyHeap <$> (insert <$> arbitrary <*> arbitrary)
instance Show a => Show (NonEmptyHeap a) where
show (NonEmptyHeap x) = show x
showsPrec n (NonEmptyHeap x) = showsPrec n x
prop_merge_keeps_proper = (\x y -> isProperRoot x && isProperRoot y ==> isProperRoot (merge x y))
prop_merge_size = (\x y -> isProperRoot x && isProperRoot y ==> size (merge x y) == size x + size y)
prop_insert_keeps_proper = (\x y -> isProperRoot x ==> isProperRoot (insert y x))
prop_insert_size = (\x y -> isProperRoot x ==> size (insert y x) == size x + 1)
prop_insert_min (NonEmptyHeap x) y =
isProperRoot x ==> min oldMin newMin == min oldMin y
where
newMin = findMin (insert y x)
oldMin = findMin x
prop_insert_list_findMin (NonEmpty ys) = isProperRoot x && findMin x == minimum ys
where
x = insertList ys E
prop_deleteMin_keeps_proper = (\(NonEmptyHeap x) -> isProperRoot x ==> isProperRoot (deleteMin x))
prop_deleteMin_size = \(NonEmptyHeap x) -> isProperRoot x ==> size (deleteMin x) == size x - 1
prop_deleteMin_insert_min (NonEmptyHeap x) = isProperRoot x ==>
findMin x'' == findMin x && isProperRoot x' && isProperRoot x''
where
x' = insert (findMin x) x
x'' = deleteMin x'
prop_deleteMin_list_second (NonEmpty ys) = not (null (tail ys)) ==>
findMin x' == head ys' && findMin x'' == head (tail ys') && isProperRoot x' && isProperRoot x''
where
x' = insertList ys E
x'' = deleteMin x'
ys' = sort ys
prop_insert1_delete_proper x y = isProperRoot x ==> isProperRoot (delete y (insert y x))
prop_insert_delete_proper x (NonEmpty ys) = isProperRoot x ==> isProperRoot (deleteAll ys (insertAll ys x))
deleteAll [] x = x
deleteAll (y:ys) x = deleteAll ys (delete y x)
insertAll [] x = x
insertAll (y:ys) x = insertAll ys (insert y x)
prop_heapsort xs = heapsort (xs :: [Int]) == sort xs
main = do
deepCheck prop_insert1_delete_proper
deepCheck prop_insert_delete_proper
deepCheck prop_merge_keeps_proper
deepCheck prop_merge_size
deepCheck prop_insert_keeps_proper
deepCheck prop_insert_size
deepCheck prop_insert_min
deepCheck prop_insert_list_findMin
deepCheck prop_deleteMin_keeps_proper
deepCheck prop_deleteMin_size
deepCheck prop_deleteMin_insert_min
deepCheck prop_deleteMin_list_second
deepCheck prop_heapsort
|
olsner/sbmalloc
|
PairingHeap.hs
|
Haskell
|
mit
| 4,311
|
{-# LANGUAGE TemplateHaskell #-}
{-# LANGUAGE FlexibleContexts #-}
module Template (
render
) where
import Data.Data
import Data.FileEmbed (embedDir)
import Data.ByteString (ByteString)
import Data.ByteString.UTF8 (toString)
import Text.Hastache.Context (mkGenericContext)
import Text.Hastache (
hastacheStr,
defaultConfig,
encodeStr,
decodeStrLT)
files :: [(FilePath, ByteString)]
files = $(embedDir "templates")
content :: FilePath -> String
content path = toString . snd . head $ filter (\(p, _) -> p == path) files
render :: Data a => FilePath -> a -> IO String
render templatePath values = do
res <- hastacheStr
defaultConfig
(encodeStr $ content templatePath)
(mkGenericContext values)
return $ decodeStrLT res
|
prasmussen/magmod
|
Template.hs
|
Haskell
|
mit
| 774
|
---------------------------------------------------------------------
--
-- | Ascetic
--
-- @Text\/Ascetic.hs@
--
-- Data structure, combinators, and functions for assembling
-- data and emitting files in any XML-like or HTML-like
-- markup language (consisting of tags, elements, attributes,
-- declarations, and ASCII text content). Trade-offs are made
-- in favor of simplicity and concision of constructors and
-- combinators.
---------------------------------------------------------------------
--
module Text.Ascetic
where
import Data.String.Utils (join)
---------------------------------------------------------------------
-- | Data type for simple markup trees and class for data types
-- that can be converted into it.
type Content = String
type Tag = String
type Attribute = String
type Value = String
data Ascetic =
C Content -- Content.
| E Tag [Ascetic] -- Element.
| A Tag [(Attribute, Value)] [Ascetic] -- Element with attributes.
| L [Ascetic] -- Undelimited list.
| D Tag [(Attribute, Value)] Ascetic -- Declaration.
deriving (Eq)
---------------------------------------------------------------------
-- | Type class for data structures that can be converted into the
-- Ascetic representation.
class ToAscetic a where
ascetic :: a -> Ascetic
---------------------------------------------------------------------
-- | Conversion to ASCII string (with indentation for legibility).
ascii x = to "" x where
showAVs avs = [a ++ "=\"" ++ v ++ "\"" | (a,v) <- avs]
to ind x = case x of
C c -> c
E t [] -> "<" ++ t ++ ">" ++ "</" ++ t ++ ">"
E t [C c] -> ind ++ "<" ++ t ++ ">" ++ c ++ "</" ++ t ++ ">"
E t xs ->
ind
++ "<" ++ t ++ ">\n"
++ join "\n" [to (ind ++ " ") x | x <- xs]
++ "\n" ++ ind ++ "</" ++ t ++ ">"
A t avs [] -> ind ++ "<" ++ t ++ " " ++ join " " (showAVs avs) ++ ">" ++ "</" ++ t ++ ">"
A t avs [C c] -> ind ++ "<" ++ t ++ " " ++ join " " (showAVs avs) ++ ">" ++ c ++ "</" ++ t ++ ">"
A t avs xs ->
ind
++ "<" ++ t ++ " " ++ join " " (showAVs avs) ++ ">\n"
++ join "\n" [to (ind ++ " ") x | x <- xs]
++ "\n" ++ ind ++ "</" ++ t ++ ">"
L xs -> join "\n" [to ind x | x <- xs]
D t avs x ->
ind
++ "<?" ++ t ++ " " ++ join " " (showAVs avs) ++ "?>\n"
++ (to ind x)
---------------------------------------------------------------------
-- | Conversion to an ASCII string that has no extra indentation or
-- newlines for legibility.
minified x = to x where
showAVs avs = [a ++ "=\"" ++ v ++ "\"" | (a,v) <- avs]
to x = case x of
C c -> c
E t [] -> "<" ++ t ++ ">" ++ "</" ++ t ++ ">"
E t [C c] -> "<" ++ t ++ ">" ++ c ++ "</" ++ t ++ ">"
E t xs ->
"<" ++ t ++ ">"
++ join "" [to x | x <- xs]
++ "" ++ "</" ++ t ++ ">"
A t avs [] -> "<" ++ t ++ " " ++ join " " (showAVs avs) ++ ">" ++ "</" ++ t ++ ">"
A t avs [C c] -> "<" ++ t ++ " " ++ join " " (showAVs avs) ++ ">" ++ c ++ "</" ++ t ++ ">"
A t avs xs ->
"<" ++ t ++ " " ++ join " " (showAVs avs) ++ ">"
++ join "" [to x | x <- xs]
++ "" ++ "</" ++ t ++ ">"
L xs -> join "" [to x | x <- xs]
D t avs x ->
"<?" ++ t ++ " " ++ join " " (showAVs avs) ++ "?>\n"
++ (to x)
---------------------------------------------------------------------
-- | Default rendering uses "min" for HTML whitespace fidelity.
instance Show Ascetic where
show = minified
--eof
|
lapets/ascetic
|
Text/Ascetic.hs
|
Haskell
|
mit
| 3,749
|
-- Pretty.hs ---
--
-- Filename: Pretty.hs
-- Description:
-- Author: Manuel Schneckenreither
-- Maintainer:
-- Created: Thu Sep 4 10:42:24 2014 (+0200)
-- Version:
-- Package-Requires: ()
-- Last-Updated: Mon Jul 23 10:23:54 2018 (+0200)
-- By: Manuel Schneckenreither
-- Update #: 62
-- URL:
-- Doc URL:
-- Keywords:
-- Compatibility:
--
--
-- Commentary:
--
--
--
--
-- Change Log:
--
--
--
--
--
-- Code:
module Data.Rewriting.ARA.Exception.Pretty
( prettyProgException
) where
import Data.Rewriting.ARA.Constants
import Data.Rewriting.ARA.Exception.Type
import Prelude hiding ((<>))
import Text.PrettyPrint
prettyProgException :: ProgException -> Doc
prettyProgException ex = text (prefix ex) <> text (getElem ex)
where
prefix ShowTextOnly {} = ""
prefix SemanticException{} = exceptionPrefixSemantic ++ " "
prefix WarningException{} = exceptionPrefixWarning ++ " "
prefix FatalException{} = exceptionPrefixFatal ++ " "
prefix ParseException{} = exceptionPrefixParse ++ " "
prefix UnsolveableException{} = exceptionPrefixUnsolveable ++ " "
getElem (ShowTextOnly x) = x
getElem (SemanticException x) = x
getElem (WarningException x) = x
getElem (FatalException x) = x
getElem (ParseException x) = x
getElem (UnsolveableException x) = x
--
-- Pretty.hs ends here
|
ComputationWithBoundedResources/ara-inference
|
src/Data/Rewriting/ARA/Exception/Pretty.hs
|
Haskell
|
mit
| 1,503
|
--------------------------------------------------------------------------------
-- |
-- Module : AI.Clustering.KMeans.Types
-- Copyright : (c) 2015 Kai Zhang
-- License : MIT
--
-- Maintainer : kai@kzhang.org
-- Stability : experimental
-- Portability : portable
--
-- <module description starting at first column>
--------------------------------------------------------------------------------
module AI.Clustering.KMeans.Types
( KMeansOpts(..)
, defaultKMeansOpts
, KMeans(..)
, Method(..)
) where
import qualified Data.Matrix.Unboxed as MU
import qualified Data.Vector.Unboxed as U
import Data.Word (Word32)
data KMeansOpts = KMeansOpts
{ kmeansMethod :: Method
, kmeansSeed :: (U.Vector Word32) -- ^ Seed for random number generation
, kmeansClusters :: Bool -- ^ Wether to return clusters, may use a lot memory
, kmeansMaxIter :: Int -- ^ Maximum iteration
}
-- | Default options.
-- > defaultKMeansOpts = KMeansOpts
-- > { kmeansMethod = KMeansPP
-- > , kmeansSeed = U.fromList [1,2,3,4,5,6,7]
-- > , kmeansClusters = True
-- > , kmeansMaxIter = 10
-- > }
defaultKMeansOpts :: KMeansOpts
defaultKMeansOpts = KMeansOpts
{ kmeansMethod = KMeansPP
, kmeansSeed = U.fromList [2341,2342,3934,425,2345,80006,2343,234491,124,729]
, kmeansClusters = True
, kmeansMaxIter = 10000
}
-- | Results from running kmeans
data KMeans a = KMeans
{ membership :: U.Vector Int -- ^ A vector of integers (0 ~ k-1)
-- indicating the cluster to which each
-- point is allocated.
, centers :: MU.Matrix Double -- ^ A matrix of cluster centers.
, clusters :: Maybe [[a]]
, sse :: Double -- ^ the sum of squared error (SSE)
} deriving (Show)
-- | Different initialization methods
data Method = Forgy -- ^ The Forgy method randomly chooses k unique
-- observations from the data set and uses these
-- as the initial means.
| KMeansPP -- ^ K-means++ algorithm.
| Centers (MU.Matrix Double) -- ^ Provide a set of k centroids
|
kaizhang/clustering
|
src/AI/Clustering/KMeans/Types.hs
|
Haskell
|
mit
| 2,205
|
-- Copyright (C) 2009-2012 John Millikin <john@john-millikin.com>
--
-- Licensed under the Apache License, Version 2.0 (the "License");
-- you may not use this file except in compliance with the License.
-- You may obtain a copy of the License at
--
-- http://www.apache.org/licenses/LICENSE-2.0
--
-- Unless required by applicable law or agreed to in writing, software
-- distributed under the License is distributed on an "AS IS" BASIS,
-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-- See the License for the specific language governing permissions and
-- limitations under the License.
-- | Basic types, useful to every D-Bus application.
--
-- Authors of client applications should import "DBus.Client", which provides
-- an easy RPC-oriented interface to D-Bus methods and signals.
module DBus
(
-- * Messages
Message
-- ** Method calls
, MethodCall
, methodCall
, methodCallPath
, methodCallInterface
, methodCallMember
, methodCallSender
, methodCallDestination
, methodCallAutoStart
, methodCallReplyExpected
, methodCallBody
-- ** Method returns
, MethodReturn
, methodReturn
, methodReturnSerial
, methodReturnSender
, methodReturnDestination
, methodReturnBody
-- ** Method errors
, MethodError
, methodError
, methodErrorName
, methodErrorSerial
, methodErrorSender
, methodErrorDestination
, methodErrorBody
, methodErrorMessage
-- ** Signals
, Signal
, signal
, signalPath
, signalMember
, signalInterface
, signalSender
, signalDestination
, signalBody
-- ** Received messages
, ReceivedMessage(ReceivedMethodCall, ReceivedMethodReturn, ReceivedMethodError, ReceivedSignal)
, receivedMessageSerial
, receivedMessageSender
, receivedMessageBody
-- * Variants
, Variant
, IsVariant(..)
, variantType
, IsAtom
, IsValue
, typeOf
, typeOf'
-- * Signatures
, Signature
, Type(..)
, signature
, signature_
, signatureTypes
, formatSignature
, parseSignature
-- * Object paths
, ObjectPath
, objectPath_
, formatObjectPath
, parseObjectPath
-- * Names
-- ** Interface names
, InterfaceName
, interfaceName_
, formatInterfaceName
, parseInterfaceName
-- ** Member names
, MemberName
, memberName_
, formatMemberName
, parseMemberName
-- ** Error names
, ErrorName
, errorName_
, formatErrorName
, parseErrorName
-- ** Bus names
, BusName
, busName_
, formatBusName
, parseBusName
-- * Non-native containers
-- ** Structures
, Structure
, structureItems
-- ** Arrays
, Array
, arrayItems
-- ** Dictionaries
, Dictionary
, dictionaryItems
-- * Addresses
, Address
, addressMethod
, addressParameters
, address
, formatAddress
, formatAddresses
, parseAddress
, parseAddresses
, getSystemAddress
, getSessionAddress
, getStarterAddress
-- * Message marshaling
, Endianness (..)
-- ** Marshal
, marshal
, MarshalError
, marshalErrorMessage
-- ** Unmarshal
, unmarshal
, UnmarshalError
, unmarshalErrorMessage
-- ** Message serials
, Serial
, serialValue
, firstSerial
, nextSerial
-- * D-Bus UUIDs
, UUID
, formatUUID
, randomUUID
) where
import Control.Monad (replicateM)
import qualified Data.ByteString.Char8 as Char8
import Data.Proxy (Proxy(..))
import Data.Word (Word16)
import System.Random (randomRIO)
import Text.Printf (printf)
import DBus.Internal.Address
import DBus.Internal.Message
import qualified DBus.Internal.Types
import DBus.Internal.Types hiding (typeOf)
import DBus.Internal.Wire
-- | Deprecated. Get the D-Bus type corresponding to the given Haskell value. The value
-- may be @undefined@.
typeOf :: IsValue a => a -> Type
typeOf = DBus.Internal.Types.typeOf
-- | Get the D-Bus type corresponding to the given Haskell type 'a'.
typeOf' :: IsValue a => Proxy a -> Type
typeOf' = DBus.Internal.Types.typeOf_
-- | Construct a new 'MethodCall' for the given object, interface, and method.
--
-- Use fields such as 'methodCallDestination' and 'methodCallBody' to populate
-- a 'MethodCall'.
--
-- @
--{-\# LANGUAGE OverloadedStrings \#-}
--
--methodCall \"/\" \"org.example.Math\" \"Add\"
-- { 'methodCallDestination' = Just \"org.example.Calculator\"
-- , 'methodCallBody' = ['toVariant' (1 :: Int32), 'toVariant' (2 :: Int32)]
-- }
-- @
methodCall :: ObjectPath -> InterfaceName -> MemberName -> MethodCall
methodCall path iface member = MethodCall path (Just iface) member Nothing Nothing True True []
-- | Construct a new 'MethodReturn', in reply to a method call with the given
-- serial.
--
-- Use fields such as 'methodReturnBody' to populate a 'MethodReturn'.
methodReturn :: Serial -> MethodReturn
methodReturn s = MethodReturn s Nothing Nothing []
-- | Construct a new 'MethodError', in reply to a method call with the given
-- serial.
--
-- Use fields such as 'methodErrorBody' to populate a 'MethodError'.
methodError :: Serial -> ErrorName -> MethodError
methodError s name = MethodError name s Nothing Nothing []
-- | Construct a new 'Signal' for the given object, interface, and signal name.
--
-- Use fields such as 'signalBody' to populate a 'Signal'.
signal :: ObjectPath -> InterfaceName -> MemberName -> Signal
signal path iface member = Signal path iface member Nothing Nothing []
-- | No matter what sort of message was received, get its serial.
receivedMessageSerial :: ReceivedMessage -> Serial
receivedMessageSerial (ReceivedMethodCall s _) = s
receivedMessageSerial (ReceivedMethodReturn s _) = s
receivedMessageSerial (ReceivedMethodError s _) = s
receivedMessageSerial (ReceivedSignal s _) = s
receivedMessageSerial (ReceivedUnknown s _) = s
-- | No matter what sort of message was received, get its sender (if provided).
receivedMessageSender :: ReceivedMessage -> Maybe BusName
receivedMessageSender (ReceivedMethodCall _ msg) = methodCallSender msg
receivedMessageSender (ReceivedMethodReturn _ msg) = methodReturnSender msg
receivedMessageSender (ReceivedMethodError _ msg) = methodErrorSender msg
receivedMessageSender (ReceivedSignal _ msg) = signalSender msg
receivedMessageSender (ReceivedUnknown _ msg) = unknownMessageSender msg
-- | No matter what sort of message was received, get its body (if provided).
receivedMessageBody :: ReceivedMessage -> [Variant]
receivedMessageBody (ReceivedMethodCall _ msg) = methodCallBody msg
receivedMessageBody (ReceivedMethodReturn _ msg) = methodReturnBody msg
receivedMessageBody (ReceivedMethodError _ msg) = methodErrorBody msg
receivedMessageBody (ReceivedSignal _ msg) = signalBody msg
receivedMessageBody (ReceivedUnknown _ msg) = unknownMessageBody msg
-- | Convert a 'Message' into a 'Char8.ByteString'. Although unusual, it is
-- possible for marshaling to fail; if this occurs, an error will be
-- returned instead.
marshal :: Message msg => Endianness -> Serial -> msg -> Either MarshalError Char8.ByteString
marshal = marshalMessage
-- | Parse a 'Char8.ByteString' into a 'ReceivedMessage'. The result can be
-- inspected to see what type of message was parsed. Unknown message types
-- can still be parsed successfully, as long as they otherwise conform to
-- the D-Bus standard.
unmarshal :: Char8.ByteString -> Either UnmarshalError ReceivedMessage
unmarshal = unmarshalMessage
-- | A D-Bus UUID is 128 bits of data, usually randomly generated. They are
-- used for identifying unique server instances to clients.
--
-- Older versions of the D-Bus spec also called these values /GUIDs/.
--
-- D-Bus UUIDs are not the same as the RFC-standardized UUIDs or GUIDs.
newtype UUID = UUID Char8.ByteString
deriving (Eq, Ord, Show)
-- | Format a D-Bus UUID as hex-encoded ASCII.
formatUUID :: UUID -> String
formatUUID (UUID bytes) = Char8.unpack bytes
-- | Generate a random D-Bus UUID. This value is suitable for use in a
-- randomly-allocated address, or as a listener's socket address
-- @\"guid\"@ parameter.
randomUUID :: IO UUID
randomUUID = do
-- The version of System.Random bundled with ghc < 7.2 doesn't define
-- instances for any of the fixed-length word types, so we imitate
-- them using the instance for Int.
--
-- 128 bits is 8 16-bit integers. We use chunks of 16 instead of 32
-- because Int is not guaranteed to be able to store a Word32.
let hexInt16 i = printf "%04x" (i :: Int)
int16s <- replicateM 8 (randomRIO (0, fromIntegral (maxBound :: Word16)))
return (UUID (Char8.pack (concatMap hexInt16 int16s)))
|
rblaze/haskell-dbus
|
lib/DBus.hs
|
Haskell
|
apache-2.0
| 8,863
|
{-# LANGUAGE OverloadedStrings #-}
-- |
-- Module : Geography.VectorTile.Util
-- Copyright : (c) Colin Woodbury 2016 - 2018
-- License : BSD3
-- Maintainer: Colin Woodbury <colingw@gmail.com>
module Data.Geometry.VectorTile.Util where
import Data.Geometry.VectorTile.Geometry (Point (..))
import qualified Data.Sequence as Seq
import qualified Data.Text as Text
---
-- | A strict pair of Ints.
data Pair = Pair !Int !Int
-- | A sort of "self-zip", forming pairs from every two elements in a list.
-- Fails if there is an uneven number of elements.
safePairsWith :: (a -> Int) -> Seq.Seq a -> Either Text.Text (Seq.Seq Point)
safePairsWith f list = if null err then Right pts else Left "Uneven number of parameters given."
where
(pts, err) = go list
go Seq.Empty = (Seq.empty, Seq.empty)
go (a Seq.:<| Seq.Empty) = (Seq.empty, Seq.singleton a)
go (a Seq.:<| b Seq.:<| rest) = (Point (f a) (f b) Seq.<| (fst . go $ rest), snd . go $ rest)
-- | Flatten a list of pairs. Equivalent to:
--
-- > ps ^.. each . both
unpairs :: [(a,a)] -> [a]
unpairs = foldr (\(a,b) acc -> a : b : acc) []
{-# INLINE unpairs #-}
|
sitewisely/zellige
|
src/Data/Geometry/VectorTile/Util.hs
|
Haskell
|
apache-2.0
| 1,189
|
{-# LANGUAGE OverloadedStrings #-}
{-# LANGUAGE QuasiQuotes #-}
{-# LANGUAGE FlexibleContexts #-}
module FormatHandler.Html
( htmlFormatHandler
, YesodAloha (..)
, splitTitle
, titleForm
) where
import FormatHandler
import Text.Julius (julius)
import Text.HTML.SanitizeXSS (sanitizeBalance)
import qualified Data.Text as T
import qualified Data.Text.Lazy as TL
import Yesod.Core
import Yesod.Form
import Yesod.Form.Jquery
import Text.Lucius (lucius)
import Control.Monad.Trans.Class (lift)
import Control.Monad.IO.Class (liftIO)
import Text.Hamlet (shamlet)
import Data.Maybe (listToMaybe, mapMaybe)
import Text.Blaze (preEscapedText)
import qualified Data.Set as Set
import qualified Data.Text.Lazy.Encoding as TLE
import Data.Text.Encoding.Error (lenientDecode)
import qualified Data.ByteString.Lazy as L
import Data.Enumerator (enumList)
import Text.HTML.TagSoup
import Control.Arrow ((***))
import Control.Applicative ((<$>), (<*>))
import Text.Blaze (Html)
splitTitle :: T.Text -> (Maybe T.Text, T.Text)
splitTitle t =
case T.stripPrefix "Title: " t of
Just rest ->
let (title, rest') = T.break (== '\n') rest
in (Just title, T.drop 1 rest')
Nothing -> (Nothing, t)
joinTitle :: (a -> T.Text) -> Maybe T.Text -> a -> T.Text
joinTitle unwrap Nothing t = unwrap t
joinTitle unwrap (Just a) t = T.concat ["Title: ", a, "\n", unwrap t]
titleForm :: RenderMessage master FormMessage
=> Field sub master a
-> (T.Text -> a)
-> (a -> T.Text)
-> GWidget sub master ()
-> Maybe T.Text
-> Html
-> Form sub master (FormResult T.Text, GWidget sub master ())
titleForm field wrap unwrap extraWidget mt =
(fmap . fmap) (\(a, b) -> (a, b >> extraWidget))
$ renderTable $ joinTitle unwrap
<$> aopt textField "Title" (mtitle :: Maybe (Maybe T.Text))
<*> areq field "Content" (fmap wrap content)
where
(mtitle, content) = maybe (Nothing, Nothing) ((Just *** Just) . splitTitle) mt
htmlFormatHandler :: (YesodAloha master, YesodJquery master) => FormatHandler master
htmlFormatHandler = FormatHandler
{ fhExts = Set.singleton "html"
, fhName = "HTML"
, fhForm = titleForm alohaHtmlField id id (return ())
, fhWidget = widget
, fhFilter = Just . enumList 8 . L.toChunks . TLE.encodeUtf8 . TL.fromStrict . sanitizeBalance . TL.toStrict . TLE.decodeUtf8With lenientDecode
, fhRefersTo = const $ const $ return []
, fhTitle = \sm uri -> fmap (fst . splitTitle) $ liftIO $ uriToText sm uri
, fhFlatWidget = widget
, fhToText = \sm uri -> fmap (Just . plain) $ liftIO $ uriToText sm uri
, fhExtraParents = \_ _ -> return []
}
where
widget sm uri = do
t <- fmap (snd . splitTitle) $ liftIO $ uriToText sm uri
toWidget $ preEscapedText t
plain = T.concat . mapMaybe plain' . parseTags
plain' (TagText t) = Just t
plain' _ = Nothing
class YesodAloha a where
urlAloha :: a -> Either (Route a) T.Text
urlAlohaPlugins :: a -> [Either (Route a) T.Text]
alohaHtmlField :: (YesodAloha master, YesodJquery master) => Field sub master T.Text
alohaHtmlField = Field
{ fieldParse = return . Right . fmap sanitizeBalance . listToMaybe
, fieldView = \theId name val _isReq -> do
y <- lift getYesod
addScriptEither $ urlJqueryJs y
addScriptEither $ urlAloha y
mapM_ addScriptEither $ urlAlohaPlugins y
toWidget [shamlet|
<div ##{theId}-container>
<textarea ##{theId} name=#{name}>#{showVal val}
|]
toWidget [julius|$(function(){$("##{theId}").aloha();})|]
toWidget [lucius|##{theId}-container { width: 800px; height: 400px; overflow: auto }|]
}
where
showVal = either id id
|
snoyberg/yesodcms
|
FormatHandler/Html.hs
|
Haskell
|
bsd-2-clause
| 3,767
|
module Propellor.Property.Network where
import Propellor
import Propellor.Property.File
type Interface = String
ifUp :: Interface -> Property NoInfo
ifUp iface = cmdProperty "ifup" [iface]
-- | Resets /etc/network/interfaces to a clean and empty state,
-- containing just the standard loopback interface, and with
-- interfacesD enabled.
--
-- This can be used as a starting point to defining other interfaces.
--
-- No interfaces are brought up or down by this property.
cleanInterfacesFile :: Property NoInfo
cleanInterfacesFile = hasContent interfacesFile
[ "# Deployed by propellor, do not edit."
, ""
, "source-directory interfaces.d"
, ""
, "# The loopback network interface"
, "auto lo"
, "iface lo inet loopback"
]
`describe` ("clean " ++ interfacesFile)
-- | Configures an interface to get its address via dhcp.
dhcp :: Interface -> Property NoInfo
dhcp iface = hasContent (interfaceDFile iface)
[ "auto " ++ iface
, "iface " ++ iface ++ " inet dhcp"
]
`describe` ("dhcp " ++ iface)
`requires` interfacesDEnabled
-- | Writes a static interface file for the specified interface.
--
-- The interface has to be up already. It could have been brought up by
-- DHCP, or by other means. The current ipv4 addresses
-- and routing configuration of the interface are written into the file.
--
-- If the interface file already exists, this property does nothing,
-- no matter its content.
--
-- (ipv6 addresses are not included because it's assumed they come up
-- automatically in most situations.)
static :: Interface -> Property NoInfo
static iface = check (not <$> doesFileExist f) setup
`describe` desc
`requires` interfacesDEnabled
where
f = interfaceDFile iface
desc = "static " ++ iface
setup = property desc $ do
ls <- liftIO $ lines <$> readProcess "ip"
["-o", "addr", "show", iface, "scope", "global"]
stanzas <- liftIO $ concat <$> mapM mkstanza ls
ensureProperty $ hasContent f $ ("auto " ++ iface) : stanzas
mkstanza ipline = case words ipline of
-- Note that the IP address is written CIDR style, so
-- the netmask does not need to be specified separately.
(_:iface':"inet":addr:_) | iface' == iface -> do
gw <- getgateway
return $ catMaybes
[ Just $ "iface " ++ iface ++ " inet static"
, Just $ "\taddress " ++ addr
, ("\tgateway " ++) <$> gw
]
_ -> return []
getgateway = do
rs <- lines <$> readProcess "ip"
["route", "show", "scope", "global", "dev", iface]
return $ case words <$> headMaybe rs of
Just ("default":"via":gw:_) -> Just gw
_ -> Nothing
-- | 6to4 ipv6 connection, should work anywhere
ipv6to4 :: Property NoInfo
ipv6to4 = hasContent (interfaceDFile "sit0")
[ "# Deployed by propellor, do not edit."
, "iface sit0 inet6 static"
, "\taddress 2002:5044:5531::1"
, "\tnetmask 64"
, "\tgateway ::192.88.99.1"
, "auto sit0"
]
`describe` "ipv6to4"
`requires` interfacesDEnabled
`onChange` ifUp "sit0"
interfacesFile :: FilePath
interfacesFile = "/etc/network/interfaces"
-- | A file in the interfaces.d directory.
interfaceDFile :: Interface -> FilePath
interfaceDFile iface = "/etc/network/interfaces.d" </> iface
-- | Ensures that files in the the interfaces.d directory are used.
interfacesDEnabled :: Property NoInfo
interfacesDEnabled = containsLine interfacesFile "source-directory interfaces.d"
`describe` "interfaces.d directory enabled"
|
shosti/propellor
|
src/Propellor/Property/Network.hs
|
Haskell
|
bsd-2-clause
| 3,366
|
module WebToInk.Converter.ConverterService ( prepareKindleGeneration
, getTitle
, getMobi
) where
import System.Directory (createDirectoryIfMissing, getDirectoryContents)
import System.IO (writeFile)
import System.IO.Temp (createTempDirectory)
import System.Cmd (rawSystem)
import System.Exit (ExitCode (..))
import System.Posix.Files (setFileMode, unionFileModes, ownerModes, otherExecuteMode)
import System.FilePath(combine, takeExtension, (<.>))
import Data.Char (isAscii)
import Data.List (isPrefixOf, nub)
import Data.Functor ((<$>))
import Control.Applicative((<*>))
import Control.Exception (throwIO, try, Exception)
import qualified Data.ByteString.Char8 as C
import WebToInk.Converter.HtmlPages
import WebToInk.Converter.Images (getImages)
import WebToInk.Converter.Download (downloadPage, savePage, downloadAndSaveImages, getSrcFilePath)
import WebToInk.Converter.OpfGeneration (generateOpf)
import WebToInk.Converter.TocGeneration (generateToc)
import WebToInk.Converter.Types
import WebToInk.Converter.Constants
import WebToInk.Converter.Exceptions
import WebToInk.Converter.Utils
import WebToInk.Converter.Logger
-- | Tries to download page at given url and resolve title.
-- If anything goes wrong an empty string is returned.
getTitle :: Url -> IO (Either String String)
getTitle url = do
logd $ "Getting title for: " ++ url
result <- try go :: (Exception a) => IO (Either a String)
case result of
Right title -> return $ Right title
Left exception -> handleException exception
where
go = do
maybeToc <- downloadPage url
logt "Downloaded page, resolving title"
return $ case maybeToc of
Just toc -> resolveTitle Nothing toc
Nothing -> ""
-- | Resolves page at url and all direct children.
-- Downloads all the pages and their images.
-- Then generates a .mobi file from it using the kindlegen tool
-- Finally it returns the path to the generated mobi file from which it can be downloaded.
getMobi :: Url -> String -> String -> FilePath -> IO (Either String FilePath)
getMobi url title author targetFolder = do
logd $ "Preparing " ++ title ++ " by " ++ author
result <- try go :: (Exception a) => IO (Either a FilePath)
case result of
Right fullFilePath -> return $ Right fullFilePath
Left exception -> handleException exception
where
go = do
path <- prepareKindleGeneration (Just title) (Just author) "en-us" url targetFolder
-- Allow all users to enter path and read from it since we want to make this available
-- TODO: handle the case where current user is not permitted to change permissions
-- setFileMode path $ unionFileModes ownerModes otherExecuteMode
let targetFile = filter isAscii title<.>"mobi"
runKindlegen targetFile path True
runKindlegen targetFile path firstTime = do
result <- rawSystem "kindlegen" [ "-o", targetFile, combine path "book.opf" ]
case result of
ExitSuccess -> return (combine path targetFile)
-- In case of warnings (1) we are ok
ExitFailure 1 -> return (combine path targetFile)
-- In case of problems related to javascript (2) remove it from all pages and try again
ExitFailure 2 -> if firstTime
then removeJavaScriptsAndTryAgain targetFile path
else throwIO $ KindlegenException 2
-- All others are problematic and need to be raised
ExitFailure code -> throwIO $ KindlegenException code
removeJavaScriptsAndTryAgain targetFile path = do
htmlFiles <- fmap getHtmlFilePaths . getDirectoryContents $ pagesFullPath
mapM_ removeScriptsFromFileAndSave htmlFiles
runKindlegen targetFile path False
where
removeScriptsFromFileAndSave fullPath = removeScriptsFromFile fullPath >>= saveContentsToFile fullPath
removeScriptsFromFile = fmap (removeScripts . C.unpack) . C.readFile
saveContentsToFile fullPath = C.writeFile fullPath . C.pack
getHtmlFilePaths = map (combine pagesFullPath) . filter isHtmlFile
pagesFullPath = combine path pagesFolder
isHtmlFile file = let extension = takeExtension file
in extension == ".html" || extension == ".htm"
main = testConverter
testLogger = do
initLogger "debug" (Just "./debug.log")
logi "hello world"
logd "hello world"
logt "hello world"
loge "hello world"
logw "hello world"
testConverter = do
initLogger "debug" (Just "./debug.log")
result <- getMobi url title author targetFolder
case result of
Right filePath -> logi $ "Success: " ++ filePath
Left error -> loge $ "Error: " ++ error
return ()
where
url = "http://static.springsource.org/spring/docs/current/spring-framework-reference/html/overview.html"
title = "Spring"
author = "Team"
targetFolder = "../books"
prepareKindleGeneration :: Maybe String -> Maybe String -> String -> Url -> FilePath -> IO FilePath
prepareKindleGeneration maybeTitle maybeAuthor language tocUrl folder = do
logd $ "Getting pages from: " ++ tocUrl
maybeGetHtmlPagesResult <- getHtmlPages tocUrl
case maybeGetHtmlPagesResult of
Just result -> logd ("Got pages, creating webtoink temp directory at: " ++ folder)
>> createTempDirectory folder "webtoink" >>= prepare result
Nothing -> loge "Could not download table of contents and processed no html pages"
>> throwIO TableOfContentsCouldNotBeDownloadedException
where
prepare (GetHtmlPagesResult tocContent pagesDic) targetFolder = do
let author = resolveAuthor maybeAuthor tocContent
let title = resolveTitle maybeTitle tocContent
let topPagesDic = filter (isTopLink . fst) pagesDic
let topPages = map fst topPagesDic
logd $ "Preparing for kindlegen " ++ "(Author: " ++ show author ++ "Title: " ++ show title ++ ")"
logt $ prettifyList ", " topPagesDic
createKindleStructure title author topPagesDic topPages targetFolder
where
correctFolder targetFolder (filePath, url) = (combine targetFolder filePath, url)
createKindleStructure title author topPagesDic topPages targetFolder = do
logd $ "created temp folder" ++ show targetFolder
logd "Starting to download pages"
result <- downloadPages tocUrl topPagesDic targetFolder
let failedFileNames = map piFileName $ failedPages result
let goodTopPages = filter (`notElem` failedFileNames) topPages
logt $ "Successfully downloaded: " ++ (prettifyList ", " goodTopPages)
logt $ "Failed to download: " ++ (prettifyList ", " failedFileNames)
logd "Generating book.opf"
let opfString = generateOpf goodTopPages (allImageUrls result) title language author
writeFile (combine targetFolder "book.opf") opfString
logd "Generating toc.ncx"
let tocString = generateToc goodTopPages title language author
writeFile (combine targetFolder "toc.ncx") tocString
return targetFolder
downloadPages :: Url -> [(FilePath, Url)] -> FilePath -> IO DownloadPagesResult
downloadPages tocUrl topPagesDic targetFolder = do
let rootUrl = getRootUrl tocUrl
downloadResults <- mapM (\(fileName, pageUrl) ->
tryProcessPage (PageInfo rootUrl pageUrl fileName) targetFolder) topPagesDic
let uniqueImageUrls =
map (getSrcFilePath "") . nub . concatMap allImageUrls $ downloadResults
let allFailedPages = concatMap failedPages downloadResults
return $ DownloadPagesResult uniqueImageUrls allFailedPages
tryProcessPage :: PageInfo -> FilePath -> IO DownloadPagesResult
tryProcessPage pi targetFolder = do
maybePageContents <- downloadPage (piPageUrl pi)
case maybePageContents of
Just pageContents -> do
imageUrls <- processPage pi pageContents targetFolder
return $ DownloadPagesResult imageUrls []
Nothing -> return $ DownloadPagesResult [] [pi]
processPage :: PageInfo -> PageContents -> FilePath -> IO [String]
processPage pi pageContents targetFolder = do
let imageUrls = (filter (not . ("https:" `isPrefixOf`)) . getImages) pageContents
downloadAndSaveImages targetFolder (piRootUrl pi) (piPageUrl pi) imageUrls
let adaptedPageContents = cleanAndLocalize imageUrls pageContents
savePage targetFolder (piFileName pi) adaptedPageContents
return imageUrls
cleanAndLocalize :: [Url] -> PageContents -> PageContents
cleanAndLocalize imageUrls pageContents =
removeBaseHref . localizeSrcUrls ("../" ++ imagesFolder) imageUrls $ pageContents
prettifyList :: Show a => String -> [a] -> String
prettifyList delim = foldr ((++) . (++) delim . show) ""
handleException exception = do
let exceptionInfo = getExceptionInfo exception
loge (fst exceptionInfo)
return $ Left (snd exceptionInfo)
where
getExceptionInfo exception =
case exception of
TableOfContentsCouldNotBeDownloadedException -> ( "TableOfContentsCouldNotBeDownloadedException."
, "Could not download page. Please check the url and/or make sure that the server is available.")
ex@(KindlegenException code) -> ( show ex
, "The kindlegen tool was unable to convert the page. Please try another format.")
ex -> ( "Unknown Exception: " ++ show ex
, "An unexcpected error occured. Please try again later.")
|
thlorenz/WebToInk
|
webtoink-converter/WebToInk/Converter/ConverterService.hs
|
Haskell
|
bsd-2-clause
| 10,328
|
{-# LANGUAGE OverloadedStrings, DuplicateRecordFields #-}
{- Example "data-point" from a "daily" result:
"time":1475564400,
"summary":"Partly cloudy until evening.",
"icon":"partly-cloudy-day",
"sunriseTime":1475590177,
"sunsetTime":1475632150,
"moonPhase":0.12,
"precipIntensity":0,
"precipIntensityMax":0,
"precipProbability":0,
"temperatureMin":55.22,
"temperatureMinTime":1475647200,
"temperatureMax":68.38,
"temperatureMaxTime":1475622000,
"apparentTemperatureMin":55.22,
"apparentTemperatureMinTime":1475647200,
"apparentTemperatureMax":68.38,
"apparentTemperatureMaxTime":1475622000,
"dewPoint":51.8,
"humidity":0.75,
"windSpeed":6.3,
"windBearing":311,
"visibility":10,
"cloudCover":0.42,
"pressure":1016,
"ozone":290.35
-}
module WeatherPoint (
CurrentWeather(..),
WeatherPoint(..)
) where
import Data.Text (Text)
import Data.Aeson
import Data.ByteString.Lazy
data WeatherPoint = WeatherPoint {
summary :: Text,
icon :: Text,
temperatureMin :: Double, -- C or F
temperatureMax :: Double, -- C or F
humidity :: Double, -- 0-1, percentage
precipProbability :: Double -- 0-1, percentage
} deriving (Eq, Show)
instance FromJSON WeatherPoint where
parseJSON (Object v) =
WeatherPoint
<$> v .: "summary"
<*> v .: "icon"
<*> v .: "temperatureMin"
<*> v .: "temperatureMax"
<*> v .: "humidity"
<*> v .: "precipProbability"
parseJSON _ = mempty
data CurrentWeather = CurrentWeather {
summary :: Text,
icon :: Text,
temperature :: Double, -- C or F
humidity :: Double, -- 0-1, percentage
precipProbability :: Double -- 0-1, percentage
} deriving (Eq, Show)
instance FromJSON CurrentWeather where
parseJSON (Object v) =
CurrentWeather
<$> v .: "summary"
<*> v .: "icon"
<*> v .: "temperature"
<*> v .: "humidity"
<*> v .: "precipProbability"
parseJSON _ = mempty
sample :: ByteString
sample = "{\"time\":1475650800,\"summary\":\"Clear throughout the day.\",\"icon\":\"clear-day\",\"sunriseTime\":1475676631,\"sunsetTime\":1475718459,\"moonPhase\":0.15,\"precipIntensity\":0,\"precipIntensityMax\":0,\"precipProbability\":0,\"temperatureMin\":54.79,\"temperatureMinTime\":1475676000,\"temperatureMax\":70.74,\"temperatureMaxTime\":1475704800,\"apparentTemperatureMin\":54.79,\"apparentTemperatureMinTime\":1475676000,\"apparentTemperatureMax\":70.74,\"apparentTemperatureMaxTime\":1475704800,\"dewPoint\":49.47,\"humidity\":0.68,\"windSpeed\":8.4,\"windBearing\":315,\"visibility\":9.36,\"cloudCover\":0.07,\"pressure\":1017.49,\"ozone\":310.41}"
|
jasonkuhrt/weather
|
source/WeatherPoint.hs
|
Haskell
|
bsd-3-clause
| 2,782
|
{-# LANGUAGE TypeSynonymInstances, FlexibleInstances, FlexibleContexts #-}--, OverlappingInstances #-}
module Language.SPL.Analyzer where
import Language.SPL.Program
import Language.SPL.Position
--import Language.SPL.Environment
type Error = (Position,String)
type Errors = [Error]
--report :: (Show a, MonadWriter Errors m) => a -> String -> m ()
--report p m = tell $ [show p ++ ": " ++ m]
--warn
--error
--inform
ask :: Name -> Type
ask = undefined
info :: (MonadReader Environment m) => Name -> m Info
info n = asks (M.lookup n)
class Analysable a where
-- | Deeply analyse types and usage of object
analyse :: a -> Bool
-- | Checks if Type matches the type of the object
match :: Type -> a -> Bool
instance (Analysable a) => Analysable [a] where --Holds for Blocks and Programs
--
analyse = and . map analyse
-- A Block matches a certain type if all the Return statements evaluate to the same type.
match t = and . map (match t)
instance Analysable Construct where
-- For a Declaration we only have to check the give type matches the initialization expression.
analyse (Declaration t n e) = match t e
-- For a Definition we have to check if the return type matches the types of all the Return expressions in the Block
-- After that continue analyzing the Block.
analyse (Definition t n ps cs bs) = match t bs && analyse bs
-- Constructs match any type.
match _ _ = True
instance Analysable Statement where
-- We analyse all the components of a given Statement.
analyse (Return _) = True
analyse (Assign n e) = match (ask n) e
analyse (If c ts es) = match BOOL c && analyse ts && analyse es
analyse (While c ls) = match BOOL c && analyse ls
analyse (Execute n as) = length ps == length as && and (zipWith match ps as) -- Warning if return value not used?
where ps = [ask n] --parameters $ ask n
-- Statement matches are only used to ensure the types of the Return statements.
-- We go on recursively checking the Blocks of If and While statements.
match VOID (Return Nothing) = True
match t (Return (Just e)) = match t e
match t (If _ ts es) = match t ts && match t es
match t (While _ ls) = match t ls
match _ _ = True
instance Analysable Expression where
-- Not implemented, probably not needed.
analyse (Call n as) = length ps == length as && and (zipWith match ps as)
where ps = [ask n]
analyse _ = True
--match (Poly a) _ = True
match (INT) (Integer _) = True
match (BOOL) (Boolean _) = True
match (LIST _) (Nil) = True
match (PAIR t s) (Pair x y) = match t x && match s y
match t (Value n) = t == ask n
match t (Call n as) = t == ask n && analyse (Call n as)
match t (Infix o l r) = match t o && match t l && match t r
match t (Prefix o e) = match t o && match t e
match t _ = False
instance Analysable BinaryOperator where
analyse = undefined
match (INT) o = o `elem` [Add, Sub, Mul, Div, Mod]
match (BOOL) o = o `elem` [Eq, Ne, Lt, Gt, Le, Ge, And, Or]
match (LIST _) o = o `elem` [Cons]
instance Analysable UnaryOperator where
analyse = undefined
match (BOOL) o = o `elem` [Not, Neg]
parameters :: Construct -> [Type]
parameters (Definition _ _ ps _ _) = map extract ps
where extract (Parameter t _) = t
parameters _ = [] -- error!
|
timjs/spl-compiler
|
Old/Analyzer.hs
|
Haskell
|
bsd-3-clause
| 3,466
|
{-# LANGUAGE OverloadedStrings #-}
{-# LANGUAGE ViewPatterns #-}
{-# LANGUAGE ScopedTypeVariables #-}
module Main where
data Binary = Bin Binary Binary | Tip
deriving Show
louds :: Binary -> [Bool]
louds binary = True : encode binary
where
encode :: Binary -> [Bool]
encode (Bin l r) = True : (encode l) ++ (encode r)
encode Tip = [False]
rank :: Eq a => a -> [a] -> Int -> Int
rank _ _ 0 = 0
rank a (x:xs) i | a == x = 1 + (rank a xs (i - 1))
rank a (_:xs) i = 0 + (rank a xs (i - 1))
rank _ [] _ = 0
select :: Eq a => a -> [a] -> Int -> Int
select _ _ 0 = -1
select a (x:xs) i | a == x = 1 + (select a xs (i - 1))
select a (_:xs) i = 1 + (select a xs (i - 0))
select _ [] _ = 0
-- Jacobson encoding
main :: IO ()
main = return ()
|
haskell-works/succinct-playground
|
app/Main.hs
|
Haskell
|
bsd-3-clause
| 836
|
{-# LANGUAGE GeneralizedNewtypeDeriving #-}
{-|
Module : Text.XML.Xleb
Description : The Xleb XML-parsing monad
Copyright : (c) Getty Ritter, 2017
License : BSD
Maintainer : Getty Ritter <xleb@infinitenegativeutility.com>
Stability : experimental
The 'Xleb' monad (and the corresponding 'XlebT' monad transformer) is
a monadic sublanguage for easily parsing XML structures.
This module is intended to be imported qualified, to avoid name
clashes with 'Prelude' functions. e.g.
> import qualified Text.XML.Xleb as X
-}
module Text.XML.Xleb
( -- * How To Use 'Xleb'
-- $use
-- * The 'Xleb' monad
Xleb
, runXleb
-- ** The 'XlebT' monad transformer
, XlebT
, runXlebT
-- * Errors
, XlebError(..)
, errorString
-- * Element Structure
, elem
, attr
, contents
, rawElement
, child
, children
-- * Parsing contained string data
, Parse
, string
, number
, reader
-- * Selecting Elements
, Selector
, byTag
, any
) where
import Prelude hiding (any, elem)
import Control.Applicative (Alternative(..))
import qualified Control.Monad.Fail as M
import qualified Control.Monad.Except as M
import qualified Control.Monad.Reader as M
import qualified GHC.Exts as GHC
import qualified Data.Functor.Identity as M
import qualified Text.XML.Light as XML
-- | The 'XlebT' monad transformer describes a computation used to
-- parse a fragment of XML from a particular element of an XML
-- structure. This may fail with an error, or it may produce a value.
newtype XlebT m a =
Xleb (M.ReaderT XML.Element (M.ExceptT XlebError m) a)
deriving (Functor, Applicative, Monad, Alternative)
-- | The 'Xleb' monad describes a computation used to parse a fragment
-- of XML from a particular element of an XML structure. This may fail
-- with an error, or it may produce a value.
type Xleb a = XlebT M.Identity a
-- | The 'XlebError' type describes the various errors that can occur
-- in the course of parsing an XML structure. If you simply want the
-- human-readable string that corresponds to your error, then use the
-- 'errorString' function.
data XlebError
= XEInElem String XlebError
-- ^ Describes the element context in which an error occurred
| XEInAttr String XlebError
-- ^ Describes the attribute context in which an error occurred
| XEParseFailure String
-- ^ Some parser function was unable to produce a value from the
-- string embedded in an XML element
| XENoSuchAttribute String
-- ^ A 'XlebT' computation required an attribute that wasn't
-- found in the specified element.
| XEUnexpectedElement String String
-- ^ A 'XlebT' computation expected one element but found another
| XENoMatchingElement Selector
-- ^ A 'XlebT' computation used a selector which did not
-- successfully describe any child elements
| XEAmbiguousElement Selector
-- ^ A 'XlebT' computation used a selector as though it would
-- unambiguously name a single child, but instead multiple child
-- elements matched the selector
| XEBadXML
-- ^ The "xml" library was unable to parse the document as XML.
| XOtherError String
-- ^ Another error occurred which was not described by the above
-- constructors
deriving (Eq, Show)
instance Monoid XlebError where
mappend x _ = x
mempty = XOtherError "unknown error"
-- | Convert a 'XlebError' value to the corresponding human-readable
-- string.
errorString :: XlebError -> String
errorString = gatherContext ""
where gatherContext ctx (XEInElem el err) =
gatherContext (ctx ++ el ++ "/") err
gatherContext ctx (XEInAttr at err) =
gatherContext (ctx ++ "[@" ++ at ++ "]") err
gatherContext ctx err =
ctx ++ ": " ++ showError err
showError (XEParseFailure err) = err
showError XEBadXML =
"Unable to parse input string as XML"
showError (XENoSuchAttribute str) =
"No attribute called '" ++ str ++ "'"
showError (XEUnexpectedElement e1 e2) =
"Unexpected element " ++ e1 ++ "; expected " ++ e2
showError (XENoMatchingElement sel) =
"No elements were found maching selector " ++ show sel
showError (XEAmbiguousElement sel) =
"Multiple elements matched the selector " ++ show sel
showError (XOtherError str) = str
showError (XEInElem _ _) = error "[unexpected]"
showError (XEInAttr _ _) = error "[unexpected]"
instance Monad m => M.MonadFail (XlebT m) where
fail = Xleb . M.throwError . XOtherError
-- | A value of type @'Parse' t@ is a function that can either produce
-- a value of type @t@ or fail with a string message.
type Parse t = String -> Either String t
-- | A 'Selector' represents some criteria by which child elements are
-- matched.
data Selector
= SelByName String
| SelByNS String
| SelBoth Selector Selector
| SelAny
deriving (Eq, Show)
instance Monoid Selector where
mempty = SelAny
mappend = SelBoth
instance GHC.IsString Selector where
fromString = SelByName
toPred :: Selector -> XML.Element -> Bool
toPred SelAny _ = True
toPred (SelByName n) el =
XML.showQName (XML.elName el) == n
toPred (SelByNS n) el =
case XML.qPrefix (XML.elName el) of
Nothing -> False
Just p -> p == n
toPred (SelBoth s1 s2) el =
toPred s1 el && toPred s2 el
-- | Find an attribute on the current focus element and parse it to a
-- value of type @t@. If the parse function fails, then this will fail
-- with 'XEParseFailure'.
attr :: Monad m => String -> Parse t -> XlebT m t
attr name parser = Xleb $ do
el <- M.ask
case XML.findAttr (XML.unqual name) el of
Nothing -> M.throwError (XENoSuchAttribute name)
Just a -> case parser a of
Left err -> M.throwError (XEInAttr name (XEParseFailure err))
Right x -> return x
-- | Take the string content of the current element and parse it to a
-- value of type @t@. If the parse function fails, then this will fail
-- with 'XEParseFailure'.
contents :: Monad m => Parse t -> XlebT m t
contents parser = Xleb $ do
cnt <- XML.strContent `fmap` M.ask
case parser cnt of
Left err -> M.throwError (XEParseFailure err)
Right x -> return x
-- | Access the raw underlying XML element that we are
-- processing. This is sometimes necessary for working with free-form
-- XML data.
rawElement :: Monad m => XlebT m XML.Element
rawElement = Xleb M.ask
-- | Use a 'Selector' that unambiguously identifies a single child
-- element of the current element and then parse it according to a
-- given 'XlebT' computation focused on that element. If no child
-- matches the provided 'Selector', then this will fail with
-- 'XENoMatchingElement'. If multiple children match the provided
-- 'Selector', then this will fail with 'XEAmbiguousElement'.
child :: Monad m => Selector -> XlebT m t -> XlebT m t
child sel (Xleb mote) = Xleb $ do
cld <- XML.filterChildren (toPred sel) `fmap` M.ask
case cld of
[] -> M.throwError (XENoMatchingElement sel)
[x] -> M.local (const x) mote
_ -> M.throwError (XEAmbiguousElement sel)
-- | Use a 'Selector' that identifies some child elements of the
-- current element and parse each according to a given 'XlebT'
-- computation, which will be repeated with focus on each child
-- element, and returning the resulting values as a list. If no child
-- elements match the 'Selector', then this will return an empty list.
children :: Monad m => Selector -> XlebT m t -> XlebT m [t]
children sel (Xleb mote) = Xleb $ do
cld <- XML.filterChildren (toPred sel) `fmap` M.ask
sequence [ M.local (const x) mote | x <- cld ]
-- | A 'Parse' function that parses numeric values according to their
-- Haskell 'Read' instance.
number :: (Read n, Num n) => Parse n
number = Right . read
-- | A 'Parse' function that accepts arbitrary string input without
-- failing.
string :: Parse String
string = Right
-- | A 'Parse' function that parses Haskell values according to their
-- 'Read' instance.
reader :: Read a => Parse a
reader = Right . read
-- | Creates a 'Selector' which expects an exact tag name.
byTag :: String -> Selector
byTag = SelByName
-- | Creates a 'Selector' which expects a specific namespace
byNamespace :: String -> Selector
byNamespace = SelByNS
-- | Creates a 'Selector' which matches any possible child element.
any :: Selector
any = SelAny
-- | @'elem' n t@ will ensure that the currently focused element is a
-- tag named @n@ and will then evaluate it using the computation
-- @t@. This will fail with 'XEUnexpectedElement' if the tag is named
-- something else.
elem :: Monad m => String -> XlebT m t -> XlebT m t
elem name (Xleb mote) = Xleb $ do
el <- M.ask
case el of
XML.Element { XML.elName = qname }
| XML.showQName qname == name -> mote
| otherwise -> M.throwError
(XEUnexpectedElement (XML.showQName qname) name)
doXleb :: XML.Element -> XlebT m t -> m (Either XlebError t)
doXleb el (Xleb mote) =
M.runExceptT (M.runReaderT mote el)
-- | Run a 'Xleb' computation over a string containing XML data,
-- producing either the resulting value or an error. If the XML data
-- contained in the argument string is invalid, then this will fail
-- with 'XEBadXML'.
runXleb :: String -> Xleb t -> Either XlebError t
runXleb raw xleb = case XML.parseXMLDoc raw of
Nothing -> Left XEBadXML
Just x -> M.runIdentity (doXleb x xleb)
-- | Run a 'XlebT' computation over a string containing XML data,
-- producing either the resulting monadic value or an error. If the
-- XML data contained in the argument string is invalid, then this
-- will fail with 'XEBadXML'.
runXlebT :: Monad m => String -> XlebT m t -> m (Either XlebError t)
runXlebT raw xleb = case XML.parseXMLDoc raw of
Nothing -> return (Left XEBadXML)
Just x -> doXleb x xleb
{- $use
The 'Xleb' monad describes both parsing /and/ traversing a given XML
structure: several of the functions to produce 'Xleb' computations
take other 'Xleb' computations, which are run on various sub-parts of
the XML tree. Consequently, instead of decomposing an XML structure
and passing it around to various functions, the 'Xleb' language treats
"the current location in the tree" as an implicit piece of data in the
'Xleb' monad.
You will generally want to identify your root note with the 'elem'
function to ensure that your root note has the tag you
expect. Children of that node can be accessed using the 'child' or
'children' function to either unambiguously find a specific child
element, or to find all child elements that match a given selector and
apply a 'Xleb' computation to each of them.
@
a <- X.child (X.byTag "a") parseA
b <- X.children (X.byTag "b") parseB
@
Leaf data tends to come in two forms in XML: attribute values (like
@\<tag attr="value"\>@) or tag content (like
@\<tag\>value\<\/tag\>@). In both cases, the 'Xleb' functions allow
you to parse that content however you'd like by providing an arbitrary
function of type @'String' -> 'Either' 'String' a@. The "xleb" library
provides several built-in functions of this type for common
situations.
@
c <- X.attr "index" X.number
d <- X.contents X.string
@
Finally, the `Xleb` monad has `Alternative` instances which allow for
concise expression of optional values or multiple possibilities.
@
e \<- X.children X.any (parseA \<|\> parseB)
f \<- optional (X.attr "total" X.number)
@
Consequently, for an XML structure like the following:
@
\<feed\>
\<title\>Feed Name\<\/title\>
\<author\>Pierre Menard\<\/author\>
\<entry title="Entry 01"\>First Post\<\/entry\>
\<entry title="Entry 02"\>Second Post Post\<\/entry\>
\<\/feed\>
@
We can write a 'Xleb' computation which is capable of parsing this
structure in a handful of lines:
@
import Control.Applicative (optional)
import qualified Text.XML.Xleb as X
feed :: X.Xleb (String, Maybe String, [(String, String)])
feed = X.elem "feed" $ do
feedTitle <- X.child (X.byTag "title") $
X.contents X.string
feedAuthor <- optional $ X.child (X.byTag "author") $
X.contents X.string
feedEntries <- X.children (X.byTag "entry") entry
return (feedTitle, feedAuthor, feedEntries)
entry :: X.Xleb (String, String)
entry = (,) \<$\> X.attr "title" X.string \<*\> X.contents X.string
@
-}
|
aisamanra/xleb
|
src/Text/XML/Xleb.hs
|
Haskell
|
bsd-3-clause
| 12,279
|
{-# LANGUAGE DeriveDataTypeable, RecordWildCards, TemplateHaskell, MagicHash #-}
{-# OPTIONS_GHC -fno-warn-missing-fields #-}
module System.Console.CmdArgs.Test.Implicit.Diffy where
import System.Console.CmdArgs
import System.Console.CmdArgs.Quote
import System.Console.CmdArgs.Test.Implicit.Util
data Diffy = Create {src :: Maybe FilePath, out :: FilePath}
| Diff {old :: FilePath, new :: FilePath, out :: FilePath}
deriving (Data,Typeable,Show,Eq)
outFlags x = x &= help "Output file" &= typFile
create = Create
{src = def &= help "Source directory" &= typDir
,out = outFlags "ls.txt"
} &= help "Create a fingerprint"
diff = Diff
{old = def &= typ "OLDFILE" &= argPos 0
,new = def &= typ "NEWFILE" &= argPos 1
,out = outFlags "diff.txt"
} &= help "Perform a diff"
mode = cmdArgsMode $ modes [create,diff] &= help "Create and compare differences" &= program "diffy" &= summary "Diffy v1.0"
$(cmdArgsQuote
[d|
outFlags_ x = x &=# help "Output file" &=# typFile
create_ = Create
{src = Nothing &=# help "Source directory" &=# typDir
,out = outFlags_ "ls.txt"
} &=# help "Create a fingerprint"
diff_ = Diff
{old = "" &=# typ "OLDFILE" &=# argPos 0
,new = "" &=# typ "NEWFILE" &=# argPos 1
,out = outFlags_ "diff.txt"
} &=# help "Perform a diff"
mode_ = cmdArgsMode# $ modes# [create_,diff_] &=# help "Create and compare differences" &=# program "diffy" &=# summary "Diffy v1.0"
|])
-- STOP MANUAL
test = do
let Tester{..} = testers "Diffy" [mode,mode_]
fails []
isHelp ["--help"] ["diffy [COMMAND] ... [OPTIONS]"] -- FIXME: Should know that root is not valid, thus no brackets on [COMMAND]
isHelp ["create","--help"] []
isHelp ["diff","--help"] []
isHelpNot ["--help"] ["diffy"]
isVersion ["--version"] "Diffy v1.0"
isVersion ["--numeric-version"] "1.0"
["create"] === create
fails ["create","file1"]
fails ["create","--quiet"]
fails ["create","--verbose"]
isVerbosity ["create"] Normal
["create","--src","x"] === create{src=Just "x"}
["create","--src","x","--src","y"] === create{src=Just "y"}
fails ["diff","--src","x"]
fails ["create","foo"]
["diff","foo1","foo2"] === diff{old="foo1",new="foo2"}
fails ["diff","foo1"]
fails ["diff","foo1","foo2","foo3"]
completion [] (0,0) [CompleteValue "create",CompleteValue "diff",CompleteValue "--out",CompleteValue "--help",CompleteValue "--version",CompleteValue "--numeric-version"]
completion ["d"] (0,1) [CompleteValue "diff"]
completion ["dd"] (0,2) []
|
ndmitchell/cmdargs
|
System/Console/CmdArgs/Test/Implicit/Diffy.hs
|
Haskell
|
bsd-3-clause
| 2,681
|
-- -----------------------------------------------------------------------------
-- Alex wrapper code.
--
-- This code is in the PUBLIC DOMAIN; you may copy it freely and use
-- it for any purpose whatsoever.
import Control.Applicative (Applicative (..))
import Data.Word (Word8)
#if defined(ALEX_BASIC_BYTESTRING) || defined(ALEX_POSN_BYTESTRING) || defined(ALEX_MONAD_BYTESTRING)
import qualified Data.Char
import qualified Data.ByteString.Lazy as ByteString
import qualified Data.ByteString.Internal as ByteString (w2c)
#elif defined(ALEX_STRICT_BYTESTRING)
import qualified Data.Char
import qualified Data.ByteString as ByteString
import qualified Data.ByteString.Internal as ByteString
import qualified Data.ByteString.Unsafe as ByteString
#else
import qualified Data.Bits
-- | Encode a Haskell String to a list of Word8 values, in UTF8 format.
utf8Encode :: Char -> [Word8]
utf8Encode = map fromIntegral . go . ord
where
go oc
| oc <= 0x7f = [oc]
| oc <= 0x7ff = [ 0xc0 + (oc `Data.Bits.shiftR` 6)
, 0x80 + oc Data.Bits..&. 0x3f
]
| oc <= 0xffff = [ 0xe0 + (oc `Data.Bits.shiftR` 12)
, 0x80 + ((oc `Data.Bits.shiftR` 6) Data.Bits..&. 0x3f)
, 0x80 + oc Data.Bits..&. 0x3f
]
| otherwise = [ 0xf0 + (oc `Data.Bits.shiftR` 18)
, 0x80 + ((oc `Data.Bits.shiftR` 12) Data.Bits..&. 0x3f)
, 0x80 + ((oc `Data.Bits.shiftR` 6) Data.Bits..&. 0x3f)
, 0x80 + oc Data.Bits..&. 0x3f
]
#endif
type Byte = Word8
-- -----------------------------------------------------------------------------
-- The input type
#if defined(ALEX_POSN) || defined(ALEX_MONAD) || defined(ALEX_GSCAN)
type AlexInput = (AlexPosn, -- current position,
Char, -- previous char
[Byte], -- pending bytes on current char
String) -- current input string
ignorePendingBytes :: AlexInput -> AlexInput
ignorePendingBytes (p,c,ps,s) = (p,c,[],s)
alexInputPrevChar :: AlexInput -> Char
alexInputPrevChar (p,c,bs,s) = c
alexGetByte :: AlexInput -> Maybe (Byte,AlexInput)
alexGetByte (p,c,(b:bs),s) = Just (b,(p,c,bs,s))
alexGetByte (p,c,[],[]) = Nothing
alexGetByte (p,_,[],(c:s)) = let p' = alexMove p c
(b:bs) = utf8Encode c
in p' `seq` Just (b, (p', c, bs, s))
#endif
#if defined(ALEX_POSN_BYTESTRING) || defined(ALEX_MONAD_BYTESTRING)
type AlexInput = (AlexPosn, -- current position,
Char, -- previous char
ByteString.ByteString) -- current input string
ignorePendingBytes :: AlexInput -> AlexInput
ignorePendingBytes i = i -- no pending bytes when lexing bytestrings
alexInputPrevChar :: AlexInput -> Char
alexInputPrevChar (p,c,s) = c
alexGetByte :: AlexInput -> Maybe (Byte,AlexInput)
alexGetByte (p,_,cs) | ByteString.null cs = Nothing
| otherwise = let b = ByteString.head cs
cs' = ByteString.tail cs
c = ByteString.w2c b
p' = alexMove p c
in p' `seq` cs' `seq` Just (b, (p', c, cs'))
#endif
#ifdef ALEX_BASIC_BYTESTRING
type AlexInput = (Char,
ByteString.ByteString)
alexInputPrevChar :: AlexInput -> Char
alexInputPrevChar (c,_) = c
alexGetByte (_, cs)
| ByteString.null cs = Nothing
| otherwise = Just (ByteString.head cs,
(ByteString.w2c $ ByteString.head cs,
ByteString.tail cs))
#endif
#ifdef ALEX_STRICT_BYTESTRING
data AlexInput = AlexInput { alexChar :: {-# UNPACK #-}!Char
, alexStr :: {-# UNPACK #-}!ByteString.ByteString }
alexInputPrevChar :: AlexInput -> Char
alexInputPrevChar = alexChar
alexGetByte (AlexInput _ cs)
| ByteString.null cs = Nothing
| otherwise = Just $! (ByteString.head cs, AlexInput c cs')
where
(c,cs') = (ByteString.w2c (ByteString.unsafeHead cs)
, ByteString.unsafeTail cs)
#endif
-- -----------------------------------------------------------------------------
-- Token positions
-- `Posn' records the location of a token in the input text. It has three
-- fields: the address (number of chacaters preceding the token), line number
-- and column of a token within the file. `start_pos' gives the position of the
-- start of the file and `eof_pos' a standard encoding for the end of file.
-- `move_pos' calculates the new position after traversing a given character,
-- assuming the usual eight character tab stops.
#if defined(ALEX_POSN) || defined(ALEX_MONAD) || defined(ALEX_POSN_BYTESTRING) || defined(ALEX_MONAD_BYTESTRING) || defined(ALEX_GSCAN)
data AlexPosn = AlexPn !Int !Int !Int
deriving (Eq,Show)
alexStartPos :: AlexPosn
alexStartPos = AlexPn 0 1 1
alexMove :: AlexPosn -> Char -> AlexPosn
alexMove (AlexPn a l c) '\t' = AlexPn (a+1) l (((c+7) `div` 8)*8+1)
alexMove (AlexPn a l c) '\n' = AlexPn (a+1) (l+1) 1
alexMove (AlexPn a l c) _ = AlexPn (a+1) l (c+1)
#endif
-- -----------------------------------------------------------------------------
-- Default monad
#ifdef ALEX_MONAD
data AlexState = AlexState {
alex_pos :: !AlexPosn, -- position at current input location
alex_inp :: String, -- the current input
alex_chr :: !Char, -- the character before the input
alex_bytes :: [Byte],
alex_scd :: !Int -- the current startcode
#ifdef ALEX_MONAD_USER_STATE
, alex_ust :: AlexUserState -- AlexUserState will be defined in the user program
#endif
}
-- Compile with -funbox-strict-fields for best results!
runAlex :: String -> Alex a -> Either String a
runAlex input (Alex f)
= case f (AlexState {alex_pos = alexStartPos,
alex_inp = input,
alex_chr = '\n',
alex_bytes = [],
#ifdef ALEX_MONAD_USER_STATE
alex_ust = alexInitUserState,
#endif
alex_scd = 0}) of Left msg -> Left msg
Right ( _, a ) -> Right a
newtype Alex a = Alex { unAlex :: AlexState -> Either String (AlexState, a) }
instance Functor Alex where
fmap f a = Alex $ \s -> case unAlex a s of
Left msg -> Left msg
Right (s', a') -> Right (s', f a')
instance Applicative Alex where
pure a = Alex $ \s -> Right (s, a)
fa <*> a = Alex $ \s -> case unAlex fa s of
Left msg -> Left msg
Right (s', f) -> case unAlex a s' of
Left msg -> Left msg
Right (s'', b) -> Right (s'', f b)
instance Monad Alex where
m >>= k = Alex $ \s -> case unAlex m s of
Left msg -> Left msg
Right (s',a) -> unAlex (k a) s'
return a = Alex $ \s -> Right (s,a)
alexGetInput :: Alex AlexInput
alexGetInput
= Alex $ \s@AlexState{alex_pos=pos,alex_chr=c,alex_bytes=bs,alex_inp=inp} ->
Right (s, (pos,c,bs,inp))
alexSetInput :: AlexInput -> Alex ()
alexSetInput (pos,c,bs,inp)
= Alex $ \s -> case s{alex_pos=pos,alex_chr=c,alex_bytes=bs,alex_inp=inp} of
s@(AlexState{}) -> Right (s, ())
alexError :: String -> Alex a
alexError message = Alex $ \s -> Left message
alexGetStartCode :: Alex Int
alexGetStartCode = Alex $ \s@AlexState{alex_scd=sc} -> Right (s, sc)
alexSetStartCode :: Int -> Alex ()
alexSetStartCode sc = Alex $ \s -> Right (s{alex_scd=sc}, ())
#ifdef ALEX_MONAD_USER_STATE
alexGetUserState :: Alex AlexUserState
alexGetUserState = Alex $ \s@AlexState{alex_ust=ust} -> Right (s,ust)
alexSetUserState :: AlexUserState -> Alex ()
alexSetUserState ss = Alex $ \s -> Right (s{alex_ust=ss}, ())
#endif
alexMonadScan = do
inp <- alexGetInput
sc <- alexGetStartCode
case alexScan inp sc of
AlexEOF -> alexEOF
AlexError ((AlexPn _ line column),_,_,_) -> alexError $ "lexical error at line " ++ (show line) ++ ", column " ++ (show column)
AlexSkip inp' len -> do
alexSetInput inp'
alexMonadScan
AlexToken inp' len action -> do
alexSetInput inp'
action (ignorePendingBytes inp) len
-- -----------------------------------------------------------------------------
-- Useful token actions
type AlexAction result = AlexInput -> Int -> Alex result
-- just ignore this token and scan another one
-- skip :: AlexAction result
skip input len = alexMonadScan
-- ignore this token, but set the start code to a new value
-- begin :: Int -> AlexAction result
begin code input len = do alexSetStartCode code; alexMonadScan
-- perform an action for this token, and set the start code to a new value
andBegin :: AlexAction result -> Int -> AlexAction result
(action `andBegin` code) input len = do alexSetStartCode code; action input len
token :: (AlexInput -> Int -> token) -> AlexAction token
token t input len = return (t input len)
#endif /* ALEX_MONAD */
-- -----------------------------------------------------------------------------
-- Monad (with ByteString input)
#ifdef ALEX_MONAD_BYTESTRING
data AlexState = AlexState {
alex_pos :: !AlexPosn, -- position at current input location
alex_inp :: ByteString.ByteString, -- the current input
alex_chr :: !Char, -- the character before the input
alex_scd :: !Int -- the current startcode
#ifdef ALEX_MONAD_USER_STATE
, alex_ust :: AlexUserState -- AlexUserState will be defined in the user program
#endif
}
-- Compile with -funbox-strict-fields for best results!
runAlex :: ByteString.ByteString -> Alex a -> Either String a
runAlex input (Alex f)
= case f (AlexState {alex_pos = alexStartPos,
alex_inp = input,
alex_chr = '\n',
#ifdef ALEX_MONAD_USER_STATE
alex_ust = alexInitUserState,
#endif
alex_scd = 0}) of Left msg -> Left msg
Right ( _, a ) -> Right a
newtype Alex a = Alex { unAlex :: AlexState -> Either String (AlexState, a) }
instance Monad Alex where
m >>= k = Alex $ \s -> case unAlex m s of
Left msg -> Left msg
Right (s',a) -> unAlex (k a) s'
return a = Alex $ \s -> Right (s,a)
alexGetInput :: Alex AlexInput
alexGetInput
= Alex $ \s@AlexState{alex_pos=pos,alex_chr=c,alex_inp=inp} ->
Right (s, (pos,c,inp))
alexSetInput :: AlexInput -> Alex ()
alexSetInput (pos,c,inp)
= Alex $ \s -> case s{alex_pos=pos,alex_chr=c,alex_inp=inp} of
s@(AlexState{}) -> Right (s, ())
alexError :: String -> Alex a
alexError message = Alex $ \s -> Left message
alexGetStartCode :: Alex Int
alexGetStartCode = Alex $ \s@AlexState{alex_scd=sc} -> Right (s, sc)
alexSetStartCode :: Int -> Alex ()
alexSetStartCode sc = Alex $ \s -> Right (s{alex_scd=sc}, ())
alexMonadScan = do
inp@(_,_,str) <- alexGetInput
sc <- alexGetStartCode
case alexScan inp sc of
AlexEOF -> alexEOF
AlexError ((AlexPn _ line column),_,_) -> alexError $ "lexical error at line " ++ (show line) ++ ", column " ++ (show column)
AlexSkip inp' len -> do
alexSetInput inp'
alexMonadScan
AlexToken inp'@(_,_,str') len action -> do
alexSetInput inp'
action (ignorePendingBytes inp) len
where
len = ByteString.length str - ByteString.length str'
-- -----------------------------------------------------------------------------
-- Useful token actions
type AlexAction result = AlexInput -> Int -> Alex result
-- just ignore this token and scan another one
-- skip :: AlexAction result
skip input len = alexMonadScan
-- ignore this token, but set the start code to a new value
-- begin :: Int -> AlexAction result
begin code input len = do alexSetStartCode code; alexMonadScan
-- perform an action for this token, and set the start code to a new value
andBegin :: AlexAction result -> Int -> AlexAction result
(action `andBegin` code) input len = do alexSetStartCode code; action input len
token :: (AlexInput -> Int -> token) -> AlexAction token
token t input len = return (t input len)
#endif /* ALEX_MONAD_BYTESTRING */
-- -----------------------------------------------------------------------------
-- Basic wrapper
#ifdef ALEX_BASIC
type AlexInput = (Char,[Byte],String)
alexInputPrevChar :: AlexInput -> Char
alexInputPrevChar (c,_,_) = c
-- alexScanTokens :: String -> [token]
alexScanTokens str = go ('\n',[],str)
where go inp@(_,_bs,s) =
case alexScan inp 0 of
AlexEOF -> []
AlexError _ -> error "lexical error"
AlexSkip inp' len -> go inp'
AlexToken inp' len act -> act (take len s) : go inp'
alexGetByte :: AlexInput -> Maybe (Byte,AlexInput)
alexGetByte (c,(b:bs),s) = Just (b,(c,bs,s))
alexGetByte (c,[],[]) = Nothing
alexGetByte (_,[],(c:s)) = case utf8Encode c of
(b:bs) -> Just (b, (c, bs, s))
[] -> Nothing
#endif
-- -----------------------------------------------------------------------------
-- Basic wrapper, ByteString version
#ifdef ALEX_BASIC_BYTESTRING
-- alexScanTokens :: String -> [token]
alexScanTokens str = go ('\n',str)
where go inp@(_,str) =
case alexScan inp 0 of
AlexEOF -> []
AlexError _ -> error "lexical error"
AlexSkip inp' len -> go inp'
AlexToken inp'@(_,str') _ act -> act (ByteString.take len str) : go inp'
where len = ByteString.length str - ByteString.length str'
#endif
#ifdef ALEX_STRICT_BYTESTRING
-- alexScanTokens :: String -> [token]
alexScanTokens str = go (AlexInput '\n' str)
where go inp@(AlexInput _ str) =
case alexScan inp 0 of
AlexEOF -> []
AlexError _ -> error "lexical error"
AlexSkip inp' len -> go inp'
AlexToken inp'@(AlexInput _ str') _ act -> act (ByteString.unsafeTake len str) : go inp'
where len = ByteString.length str - ByteString.length str'
#endif
-- -----------------------------------------------------------------------------
-- Posn wrapper
-- Adds text positions to the basic model.
#ifdef ALEX_POSN
--alexScanTokens :: String -> [token]
alexScanTokens str = go (alexStartPos,'\n',[],str)
where go inp@(pos,_,_,str) =
case alexScan inp 0 of
AlexEOF -> []
AlexError ((AlexPn _ line column),_,_,_) -> error $ "lexical error at line " ++ (show line) ++ ", column " ++ (show column)
AlexSkip inp' len -> go inp'
AlexToken inp' len act -> act pos (take len str) : go inp'
#endif
-- -----------------------------------------------------------------------------
-- Posn wrapper, ByteString version
#ifdef ALEX_POSN_BYTESTRING
--alexScanTokens :: ByteString -> [token]
alexScanTokens str = go (alexStartPos,'\n',str)
where go inp@(pos,_,str) =
case alexScan inp 0 of
AlexEOF -> []
AlexError ((AlexPn _ line column),_,_) -> error $ "lexical error at line " ++ (show line) ++ ", column " ++ (show column)
AlexSkip inp' len -> go inp'
AlexToken inp' len act -> act pos (ByteString.take (fromIntegral len) str) : go inp'
#endif
-- -----------------------------------------------------------------------------
-- GScan wrapper
-- For compatibility with previous versions of Alex, and because we can.
#ifdef ALEX_GSCAN
alexGScan stop state inp = alex_gscan stop alexStartPos '\n' [] inp (0,state)
alex_gscan stop p c bs inp (sc,state) =
case alexScan (p,c,bs,inp) sc of
AlexEOF -> stop p c inp (sc,state)
AlexError _ -> stop p c inp (sc,state)
AlexSkip (p',c',bs',inp') len -> alex_gscan stop p' c' bs' inp' (sc,state)
AlexToken (p',c',bs',inp') len k ->
k p c inp len (\scs -> alex_gscan stop p' c' bs' inp' scs)
(sc,state)
#endif
|
kumasento/alex
|
templates/wrappers.hs
|
Haskell
|
bsd-3-clause
| 16,536
|
{-# LANGUAGE
DeriveDataTypeable
, DeriveGeneric
, LambdaCase
, OverloadedStrings
, ScopedTypeVariables
, TemplateHaskell
, TypeFamilies
#-}
module Api.Test where
import Control.Monad.Reader
import Control.Monad.Trans.Error
import Data.Aeson
import Data.Data
import Data.JSON.Schema
import Data.Text (Text)
import GHC.Generics
import Generics.Generic.Aeson
import Generics.Regular
import Generics.Regular.XmlPickler
import Text.XML.HXT.Arrow.Pickle
import Rest
import qualified Rest.Resource as R
import ApiTypes
import qualified Api.Test.Err2 as E2
-- | Customer extends the root of the API with a reader containing the ways to identify a customer in our URLs.
-- Currently only by the customer name.
type WithText = ReaderT Text BlogApi
data Err = Err deriving (Generic, Show, Typeable)
deriveAll ''Err "PFErr"
type instance PF Err = PFErr
instance ToJSON Err where toJSON = gtoJson
instance FromJSON Err where parseJSON = gparseJson
instance JSONSchema Err where schema = gSchema
instance XmlPickler Err where xpickle = gxpickle
instance ToResponseCode Err where
toResponseCode _ = 400
data Ok = Ok deriving (Generic, Show, Typeable)
deriveAll ''Ok "PFOk"
type instance PF Ok = PFOk
instance XmlPickler Ok where xpickle = gxpickle
instance ToJSON Ok where toJSON = gtoJson
instance FromJSON Ok where parseJSON = gparseJson
instance JSONSchema Ok where schema = gSchema
resource :: Resource BlogApi WithText Text Void Void
resource = mkResourceReader
{ R.name = "test"
, R.actions = [ ("noResponse" , noResponse )
, ("onlyError" , onlyError )
, ("differentFormats" , differentFormats )
, ("intersectedFormats" , intersectedFormats )
, ("intersectedFormats2", intersectedFormats2)
, ("errorImport" , errorImport )
, ("noError" , noError )
, ("justStringO" , justStringO )
, ("preferJson" , preferJson )
, ("octetStreamOut" , octetStreamOut )
, ("onlyInput" , onlyInput )
]
}
noResponse :: Handler WithText
noResponse = mkConstHandler id $ return ()
onlyError :: Handler WithText
onlyError = mkConstHandler (jsonE . someE) $
throwError $ domainReason Err
differentFormats :: Handler WithText
differentFormats = mkInputHandler (jsonE . someE . xmlO . someO . stringI . someI) $
\case
"error" -> throwError $ domainReason Err
_ -> return Ok
intersectedFormats :: Handler WithText
intersectedFormats = mkInputHandler (jsonE . someE . xmlO . jsonO . someO . stringI . someI) $
\case
"error" -> throwError $ domainReason Err
_ -> return Ok
intersectedFormats2 :: Handler WithText
intersectedFormats2 = mkInputHandler (xmlE . someE . xmlO . jsonO . someO . stringI . someI) $
\case
"error" -> throwError $ domainReason Err
_ -> return Ok
errorImport :: Handler WithText
errorImport = mkIdHandler (stringI . rawXmlO . xmlE . someE) $ \s (_::Text) ->
case s of
"error" -> throwError $ domainReason E2.Err
_ -> return "<ok/>"
noError :: Handler WithText
noError = mkConstHandler (jsonO . someO) $ return Ok
justStringO :: Handler WithText
justStringO = mkConstHandler (stringO . someO) $ return "Ok"
preferJson :: Handler WithText
preferJson = mkInputHandler (xmlJsonO . xmlJsonE . stringI . someI) $
\case
"error" -> throwError $ domainReason Err
_ -> return Ok
octetStreamOut :: Handler WithText
octetStreamOut = mkInputHandler (fileI . fileO . xmlJsonE) $
\case
"error" -> throwError $ domainReason Err
_ -> return ("ok", "ok")
onlyInput :: Handler WithText
onlyInput = mkInputHandler (jsonI . someI) $ \() -> throwError NotFound
|
tinkerthaler/basic-invoice-rest
|
example-api/Api/Test.hs
|
Haskell
|
bsd-3-clause
| 3,901
|
{-# LANGUAGE DeriveDataTypeable #-}
module ApiAnnotation (
getAnnotation, getAndRemoveAnnotation,
getAnnotationComments,getAndRemoveAnnotationComments,
ApiAnns,
ApiAnnKey,
AnnKeywordId(..),
AnnotationComment(..),
LRdrName -- Exists for haddocks only
) where
import RdrName
import Outputable
import SrcLoc
import qualified Data.Map as Map
import Data.Data
{- Note [Api annotations]
~~~~~~~~~~~~~~~~~~~~~~
In order to do source to source conversions using the GHC API, the
locations of all elements of the original source needs to be tracked.
The includes keywords such as 'let' / 'in' / 'do' etc as well as
punctuation such as commas and braces, and also comments.
These are captured in a structure separate from the parse tree, and
returned in the pm_annotations field of the ParsedModule type.
The non-comment annotations are stored indexed to the SrcSpan of the
AST element containing them, together with a AnnKeywordId value
identifying the specific keyword being captured.
> type ApiAnnKey = (SrcSpan,AnnKeywordId)
>
> Map.Map ApiAnnKey SrcSpan
So
> let X = 1 in 2 *x
would result in the AST element
L span (HsLet (binds for x = 1) (2 * x))
and the annotations
(span,AnnLet) having the location of the 'let' keyword
(span,AnnIn) having the location of the 'in' keyword
The comments are indexed to the SrcSpan of the lowest AST element
enclosing them
> Map.Map SrcSpan [Located AnnotationComment]
So the full ApiAnns type is
> type ApiAnns = ( Map.Map ApiAnnKey SrcSpan
> , Map.Map SrcSpan [Located AnnotationComment])
This is done in the lexer / parser as follows.
The PState variable in the lexer has the following variables added
> annotations :: [(ApiAnnKey,SrcSpan)],
> comment_q :: [Located Token],
> annotations_comments :: [(SrcSpan,[Located AnnotationComment])]
The first and last store the values that end up in the ApiAnns value
at the end via Map.fromList
The comment_q captures comments as they are seen in the token stream,
so that when they are ready to be allocated via the parser they are
available.
The parser interacts with the lexer using the function
> addAnnotation :: SrcSpan -> AnnKeywordId -> SrcSpan -> P ()
which takes the AST element SrcSpan, the annotation keyword and the
target SrcSpan.
This adds the annotation to the `annotations` field of `PState` and
transfers any comments in `comment_q` to the `annotations_comments`
field.
Parser
------
The parser implements a number of helper types and methods for the
capture of annotations
> type AddAnn = (SrcSpan -> P ())
>
> mj :: AnnKeywordId -> Located e -> (SrcSpan -> P ())
> mj a l = (\s -> addAnnotation s a (gl l))
AddAnn represents the addition of an annotation a to a provided
SrcSpan, and `mj` constructs an AddAnn value.
> ams :: Located a -> [AddAnn] -> P (Located a)
> ams a@(L l _) bs = (mapM_ (\a -> a l) bs) >> return a
So the production in Parser.y for the HsLet AST element is
| 'let' binds 'in' exp {% ams (sLL $1 $> $ HsLet (snd $ unLoc $2) $4)
(mj AnnLet $1:mj AnnIn $3
:(fst $ unLoc $2)) }
This adds an AnnLet annotation for 'let', an AnnIn for 'in', as well
as any annotations that may arise in the binds. This will include open
and closing braces if they are used to delimit the let expressions.
-}
-- ---------------------------------------------------------------------
type ApiAnns = ( Map.Map ApiAnnKey [SrcSpan]
, Map.Map SrcSpan [Located AnnotationComment])
type ApiAnnKey = (SrcSpan,AnnKeywordId)
-- | Retrieve a list of annotation 'SrcSpan's based on the 'SrcSpan'
-- of the annotated AST element, and the known type of the annotation.
getAnnotation :: ApiAnns -> SrcSpan -> AnnKeywordId -> [SrcSpan]
getAnnotation (anns,_) span ann
= case Map.lookup (span,ann) anns of
Nothing -> []
Just ss -> ss
-- | Retrieve a list of annotation 'SrcSpan's based on the 'SrcSpan'
-- of the annotated AST element, and the known type of the annotation.
-- The list is removed from the annotations.
getAndRemoveAnnotation :: ApiAnns -> SrcSpan -> AnnKeywordId
-> ([SrcSpan],ApiAnns)
getAndRemoveAnnotation (anns,cs) span ann
= case Map.lookup (span,ann) anns of
Nothing -> ([],(anns,cs))
Just ss -> (ss,(Map.delete (span,ann) anns,cs))
-- |Retrieve the comments allocated to the current 'SrcSpan'
--
-- Note: A given 'SrcSpan' may appear in multiple AST elements,
-- beware of duplicates
getAnnotationComments :: ApiAnns -> SrcSpan -> [Located AnnotationComment]
getAnnotationComments (_,anns) span =
case Map.lookup span anns of
Just cs -> cs
Nothing -> []
-- |Retrieve the comments allocated to the current 'SrcSpan', and
-- remove them from the annotations
getAndRemoveAnnotationComments :: ApiAnns -> SrcSpan
-> ([Located AnnotationComment],ApiAnns)
getAndRemoveAnnotationComments (anns,canns) span =
case Map.lookup span canns of
Just cs -> (cs,(anns,Map.delete span canns))
Nothing -> ([],(anns,canns))
-- --------------------------------------------------------------------
-- | API Annotations exist so that tools can perform source to source
-- conversions of Haskell code. They are used to keep track of the
-- various syntactic keywords that are not captured in the existing
-- AST.
--
-- The annotations, together with original source comments are made
-- available in the @'pm_annotations'@ field of @'GHC.ParsedModule'@.
-- Comments are only retained if @'Opt_KeepRawTokenStream'@ is set in
-- @'DynFlags.DynFlags'@ before parsing.
--
-- Note: in general the names of these are taken from the
-- corresponding token, unless otherwise noted
-- See note [Api annotations] above for details of the usage
data AnnKeywordId
= AnnAs
| AnnAt
| AnnBang -- ^ '!'
| AnnBackquote -- ^ '`'
| AnnBy
| AnnCase -- ^ case or lambda case
| AnnClass
| AnnClose -- ^ '\#)' or '\#-}' etc
| AnnCloseC -- ^ '}'
| AnnCloseP -- ^ ')'
| AnnCloseS -- ^ ']'
| AnnColon
| AnnComma -- ^ as a list separator
| AnnCommaTuple -- ^ in a RdrName for a tuple
| AnnDarrow -- ^ '=>'
| AnnData
| AnnDcolon -- ^ '::'
| AnnDefault
| AnnDeriving
| AnnDo
| AnnDot -- ^ '.'
| AnnDotdot -- ^ '..'
| AnnElse
| AnnEqual
| AnnExport
| AnnFamily
| AnnForall
| AnnForeign
| AnnFunId -- ^ for function name in matches where there are
-- multiple equations for the function.
| AnnGroup
| AnnHeader -- ^ for CType
| AnnHiding
| AnnIf
| AnnImport
| AnnIn
| AnnInfix -- ^ 'infix' or 'infixl' or 'infixr'
| AnnInstance
| AnnLam
| AnnLarrow -- ^ '<-'
| AnnLet
| AnnMdo
| AnnMinus -- ^ '-'
| AnnModule
| AnnNewtype
| AnnOf
| AnnOpen -- ^ '(\#' or '{-\# LANGUAGE' etc
| AnnOpenC -- ^ '{'
| AnnOpenP -- ^ '('
| AnnOpenS -- ^ '['
| AnnPackageName
| AnnPattern
| AnnProc
| AnnQualified
| AnnRarrow -- ^ '->'
| AnnRec
| AnnRole
| AnnSafe
| AnnSemi -- ^ ';'
| AnnStatic -- ^ 'static'
| AnnThen
| AnnTilde -- ^ '~'
| AnnTildehsh -- ^ '~#'
| AnnType
| AnnUnit -- ^ '()' for types
| AnnUsing
| AnnVal -- ^ e.g. INTEGER
| AnnValStr -- ^ String value, will need quotes when output
| AnnVbar -- ^ '|'
| AnnWhere
| Annlarrowtail -- ^ '-<'
| Annrarrowtail -- ^ '->'
| AnnLarrowtail -- ^ '-<<'
| AnnRarrowtail -- ^ '>>-'
| AnnEofPos
deriving (Eq,Ord,Data,Typeable,Show)
instance Outputable AnnKeywordId where
ppr x = text (show x)
-- ---------------------------------------------------------------------
data AnnotationComment =
-- Documentation annotations
AnnDocCommentNext String -- ^ something beginning '-- |'
| AnnDocCommentPrev String -- ^ something beginning '-- ^'
| AnnDocCommentNamed String -- ^ something beginning '-- $'
| AnnDocSection Int String -- ^ a section heading
| AnnDocOptions String -- ^ doc options (prune, ignore-exports, etc)
| AnnDocOptionsOld String -- ^ doc options declared "-- # ..."-style
| AnnLineComment String -- ^ comment starting by "--"
| AnnBlockComment String -- ^ comment in {- -}
deriving (Eq,Ord,Data,Typeable,Show)
-- Note: these are based on the Token versions, but the Token type is
-- defined in Lexer.x and bringing it in here would create a loop
instance Outputable AnnotationComment where
ppr x = text (show x)
-- | - 'ApiAnnotation.AnnKeywordId' : 'ApiAnnotation.AnnOpen',
-- 'ApiAnnotation.AnnClose','ApiAnnotation.AnnComma',
-- 'ApiAnnotation.AnnRarrow','ApiAnnotation.AnnTildehsh',
-- 'ApiAnnotation.AnnTilde'
-- - May have 'ApiAnnotation.AnnComma' when in a list
type LRdrName = Located RdrName
|
green-haskell/ghc
|
compiler/parser/ApiAnnotation.hs
|
Haskell
|
bsd-3-clause
| 8,957
|
{-# LANGUAGE BangPatterns #-}
{-# LANGUAGE FlexibleContexts #-}
{-# LANGUAGE MultiParamTypeClasses #-}
{-# LANGUAGE Rank2Types #-}
{-# LANGUAGE TemplateHaskell #-}
{-# LANGUAGE TypeFamilies #-}
{-# LANGUAGE UndecidableInstances #-}
{-# LANGUAGE ViewPatterns #-}
-----------------------------------------------------------------------------
-- |
-- Module : Geometry.Combinators
-- Copyright : (c) 2011-2017 diagrams team (see LICENSE)
-- License : BSD-style (see LICENSE)
-- Maintainer : diagrams-discuss@googlegroups.com
--
-- Higher-level tools for combining geometric objects.
--
-----------------------------------------------------------------------------
module Geometry.Combinators
(
-- * Binary operations
beside
, atDirection
-- * n-ary operations
, appends
, position, atPoints
, cat
, sep
, sepEven
-- , composeAligned
-- * Alignment
, align
, alignBy
, alignBy'
-- * Snugging
, snug
, snugBy
-- * Centering
, center
, centerV
, snugCenter
, snugCenterV
) where
import Control.Lens ((&))
import Control.Lens.Cons
import Data.Foldable (foldl')
import Data.Maybe (fromMaybe)
import Data.Monoid.WithSemigroup
import qualified Data.Semigroup as Sem
import Geometry.Direction
import Geometry.Envelope
import Geometry.Juxtapose
import Geometry.Space
import Geometry.Trace
import Geometry.Transform
import Linear.Affine
import Linear.Metric
import Linear.V2
import Linear.Vector
------------------------------------------------------------
-- Combining two objects
------------------------------------------------------------
-- | Place two monoidal objects (/i.e./ diagrams, paths,
-- animations...) next to each other along the given vector. In
-- particular, place the second object so that the vector points
-- from the local origin of the first object to the local origin of
-- the second object, at a distance so that their envelopes are just
-- tangent. The local origin of the new, combined object is the
-- local origin of the first object (unless the first object is the
-- identity element, in which case the second object is returned
-- unchanged).
--
-- <<diagrams/src_Geometry_Combinators_besideEx.svg#diagram=besideEx&height=200>>
--
-- > besideEx = beside (r2 (20,30))
-- > (circle 1 # fc orange)
-- > (circle 1.5 # fc purple)
-- > # showOrigin
-- > # centerXY # pad 1.1
--
-- Note that @beside v@ is associative, so objects under @beside v@
-- form a semigroup for any given vector @v@. In fact, they also
-- form a monoid: 'mempty' is clearly a right identity (@beside v d1
-- mempty === d1@), and there should also be a special case to make
-- it a left identity, as described above.
--
-- In older versions of the @diagrams@ library, @beside@ put the
-- local origin of the result at the point of tangency between the
-- two inputs. That semantics can easily be recovered by performing
-- an alignment on the first input before combining. That is, if
-- @beside'@ denotes the old semantics,
--
-- > beside' v x1 x2 = beside v (x1 # align v) x2
--
-- To get something like @beside v x1 x2@ whose local origin is
-- identified with that of @x2@ instead of @x1@, use @beside
-- (negateV v) x2 x1@.
beside :: (Juxtaposable a, Sem.Semigroup a) => Vn a -> a -> a -> a
beside v d1 d2 = d1 Sem.<> juxtapose v d1 d2
-- | Place two juxtaposable objects adjacent to one another, with the
-- second placed in the direction 'd' from the first. The local
-- origin of the resulting combined object is the same as the local
-- origin of the first. See the documentation of 'beside' for more
-- information.
atDirection
:: (Juxtaposable a, Sem.Semigroup a)
=> Direction (V a) (N a) -> a -> a -> a
atDirection = beside . fromDirection
------------------------------------------------------------
-- Combining multiple objects
------------------------------------------------------------
-- | @appends x ys@ appends each of the objects in @ys@ to the object
-- @x@ in the corresponding direction. Note that each object in
-- @ys@ is positioned beside @x@ /without/ reference to the other
-- objects in @ys@, so this is not the same as iterating 'beside'.
--
-- <<diagrams/src_Geometry_Combinators_appendsEx.svg#diagram=appendsEx&width=200>>
--
-- > appendsEx = appends c (zip (iterateN 6 (rotateBy (1/6)) unitX) (repeat c))
-- > # centerXY # pad 1.1
-- > where c = circle 1
appends :: (Metric (V a), Floating (N a), Juxtaposable a, Monoid' a) => a -> [(Vn a,a)] -> a
appends d1 apps = d1 Sem.<> mconcat (map (\(v,d) -> juxtapose (signorm v) d1 d) apps)
-- | Position things absolutely: combine a list of objects
-- (e.g. diagrams or paths) by assigning them absolute positions in
-- the vector space of the combined object.
--
-- <<diagrams/src_Geometry_Combinators_positionEx.svg#diagram=positionEx&height=300>>
--
-- > positionEx = position (zip (map mkPoint [-3, -2.8 .. 3]) (repeat spot))
-- > where spot = circle 0.2 # fc black
-- > mkPoint :: Double -> P2 Double
-- > mkPoint x = p2 (x,x*x)
position :: (InSpace v n a, HasOrigin a, Monoid a) => [(Point v n, a)] -> a
position = mconcat . map (uncurry moveTo)
-- | Curried version of @position@, takes a list of points and a list of
-- objects.
atPoints :: (InSpace v n a, HasOrigin a, Monoid' a) => [Point v n] -> [a] -> a
atPoints ps as = position $ zip ps as
-- | @cat v@ positions a list of objects so that their local origins
-- lie along a line in the direction of @v@. Successive objects
-- will have their envelopes just touching. The local origin
-- of the result will be the same as the local origin of the first
-- object.
--
-- See also 'sep', which takes a distance parameter allowing
-- certain aspects of the operation to be tweaked.
--
-- See also 'Geometry.TwoD.Combinators.hcat' and
-- 'Geometry.TwoD.Combinators.vcat'
cat
:: (InSpace v n a, Enveloped a, Monoid a, HasOrigin a)
=> v n -> [a] -> a
cat v = sep v 0
-- | Similar to 'cat' but with a gap parameter which is used as the
-- distance between successive objects.
--
-- See also 'Geometry.TwoD.Combinators.hsep' and
-- 'Geometry.TwoD.Combinators.vsep'
sep
:: (InSpace v n t, Monoid t, Enveloped t, HasOrigin t)
=> v n -> n -> [t] -> t
sep _ _ [] = mempty
sep (signorm -> v) s (t0:ts) = snd $ foldl' f (n0, t0) ts
where
-- If we come across an empty envelope treat it as a point on the
-- origin (this isn't ideal but what else can we do? Maybe don't
-- even move it at all?)
extent' = fromMaybe (0,0) . extent v
n0 = snd $ extent' t0
f (!n, tAcc) t = (n + s - nMin + nMax, tAcc')
where
(nMin, nMax) = extent' t
nStart = n + s - nMin
tAcc' = tAcc `mappend` moveOriginTo (P $ negate nStart *^ v) t
-- | Evenly separate items along the vector @v@ at distance @s@,
-- starting at the 'origin'.
--
-- >>> sepEven unitX $ map regPoly [3..7]
--
-- See also 'Geometry.TwoD.Combinators.hsepEven' and
-- 'Geometry.TwoD.Combinators.vsepEven'
sepEven
:: (InSpace v n t, Metric v, Floating n, Monoid t, HasOrigin t)
=> v n -> n -> [t] -> t
sepEven (signorm -> v) s =
position . zip (iterate (.+^ s *^ v) origin)
{-# INLINE sepEven #-}
------------------------------------------------------------------------
-- Aligning
------------------------------------------------------------------------
-- | @alignBy v d a@ moves the origin of @a@ along the vector @v@. If @d
-- = 1@, the origin is moved to the edge of the boundary in the
-- direction of @v@; if @d = -1@, it moves to the edge of the boundary
-- in the direction of the negation of @v@. Other values of @d@
-- interpolate linearly (so for example, @d = 0@ centers the origin
-- along the direction of @v@).
alignBy'
:: (InSpace v n t, Fractional n, HasOrigin t)
=> (v n -> t -> Maybe (n, n)) -> v n -> n -> t -> t
alignBy' f v d t = fromMaybe t $ do
(a,b) <- f v t
Just $ moveOriginTo (P $ lerp' ((d + 1) / 2) b a *^ v) t
where
lerp' alpha a b = alpha * a + (1 - alpha) * b
-- case f v of
-- Just (a,b) -> moveOriginTo (lerp ((d + 1) / 2) a b) t
-- Nothing -> t
{-# INLINE alignBy'#-}
alignBy
:: (InSpace v n t, Enveloped t, HasOrigin t)
=> v n -> n -> t -> t
alignBy = alignBy' extent
{-# INLINE alignBy#-}
-- | @align v@ aligns an enveloped object along the edge in the
-- direction of @v@. That is, it moves the local origin in the
-- direction of @v@ until it is on the edge of the envelope. (Note
-- that if the local origin is outside the envelope to begin with, it
-- may have to move \"backwards\".)
align
:: (InSpace v n t, Enveloped t, HasOrigin t)
=> v n -> t -> t
align v = alignBy v 1
-- | Version of @alignBy@ specialized to use @traceBoundary@
snugBy
:: (InSpace v n t, Fractional n, Traced t, HasOrigin t)
=> v n -> n -> t -> t
snugBy = alignBy' traceBoundary
traceBoundary :: (InSpace v n t, Traced t) => v n -> t -> Maybe (n,n)
traceBoundary = \v t ->
case appTrace (getTrace t) origin v of
x :< xs -> foldl' (\(V2 a b) x' -> V2 (min a x') (max b x')) (V2 x x) xs
& \(V2 a b) -> Just (a,b)
_ -> Nothing
{-# INLINE traceBoundary #-}
-- | Like align but uses trace.
snug :: (InSpace v n t, Fractional n, Traced t, HasOrigin t)
=> v n -> t -> t
snug v = snugBy v 1
-- | @centerV v@ centers an enveloped object along the direction of
-- @v@.
centerV
:: (InSpace v n a, Enveloped a, HasOrigin a)
=> v n -> a -> a
centerV v = alignBy v 0
applyAll :: Foldable t => t (b -> b) -> b -> b
applyAll = foldr (.) id
-- | @center@ centers an enveloped object along all of its basis vectors.
center
:: (InSpace v n a, Traversable v, Enveloped a, HasOrigin a)
=> a -> a
center = applyAll fs
where
fs = map centerV basis
-- | Like @centerV@ using trace.
snugCenterV
:: (InSpace v n a, Fractional n, Traced a, HasOrigin a)
=> v n -> a -> a
snugCenterV v = snugBy v 0
-- | Like @center@ using trace.
snugCenter
:: (InSpace v n a, Traversable v, Fractional n, HasOrigin a, Traced a)
=> a -> a
snugCenter = applyAll fs
where
fs = map snugCenterV basis
|
cchalmers/geometry
|
src/Geometry/Combinators.hs
|
Haskell
|
bsd-3-clause
| 10,535
|
import Test.Hspec
import EratosthenesSieve
main :: IO ()
main = hspec $ do
describe "The sieve of Eratosthenes" $ do
it "should return a list with all the prime numbers up to a given number" $ do
primesUpTo 2 `shouldBe` [2]
primesUpTo 3 `shouldBe` [2, 3]
primesUpTo 5 `shouldBe` [2, 3, 5]
primesUpTo 7 `shouldBe` [2, 3, 5, 7]
primesUpTo 11 `shouldBe` [2, 3, 5, 7, 11]
|
theUniC/eratosthenes-sieve.hs
|
test/Spec.hs
|
Haskell
|
bsd-3-clause
| 405
|
module Pipe
( TSink
, TSource
, TPipe
, newTPipe
, writeTSink
, readTSource
) where
import Control.Concurrent.STM
newtype TSink a = TSink (TChan a)
newtype TSource a = TSource (TChan a)
type TPipe a = (TSink a, TSource a)
writeTSink :: TSink a -> a -> STM ()
writeTSink (TSink chan) = writeTChan chan
readTSource :: TSource a -> STM a
readTSource (TSource chan) = readTChan chan
newTPipe :: STM (TPipe a)
newTPipe = do
chan <- newTChan
return (TSink chan, TSource chan)
|
frerich/lambdacrawler
|
src/Pipe.hs
|
Haskell
|
bsd-3-clause
| 512
|
-- Copyright (c) 2016-present, Facebook, Inc.
-- All rights reserved.
--
-- This source code is licensed under the BSD-style license found in the
-- LICENSE file in the root directory of this source tree.
module Duckling.Duration.HU.Tests
( tests
) where
import Data.String
import Prelude
import Test.Tasty
import Duckling.Dimensions.Types
import Duckling.Duration.HU.Corpus
import Duckling.Testing.Asserts
tests :: TestTree
tests = testGroup "HU Tests"
[ makeCorpusTest [Seal Duration] corpus
]
|
facebookincubator/duckling
|
tests/Duckling/Duration/HU/Tests.hs
|
Haskell
|
bsd-3-clause
| 509
|
-- |
-- Module : Language.SequentCore.Plugin
-- Description : GHC plugin library
-- Maintainer : maurerl@cs.uoregon.edu
-- Stability : experimental
--
-- Tools for writing a GHC plugin using the Sequent Core language in place of
-- GHC Core.
module Language.SequentCore.Plugin (
sequentPass, sequentPassWithFlags
) where
import Language.SequentCore.Driver.Flags
import Language.SequentCore.Syntax
import Language.SequentCore.Translate
import GhcPlugins ( ModGuts, CoreM
, bindsOnlyPass
, deShadowBinds
)
-- | Given a function that processes a module's bindings as Sequent Core terms,
-- perform the same processing as a Core-to-Core pass usable from a GHC plugin.
-- Intended to be passed to the @CoreDoPluginPass@ constructor as part of your
-- plugin's @installCoreToDos@ function. See "Language.SequentCore.Dump" for an
-- example and the GHC manual for more details.
sequentPass :: ([SeqCoreBind] -> CoreM [SeqCoreBind])
-- ^ A processing function. May assume that there are no shadowed
-- identifiers in the given binders (this is ensured by a call to
-- 'deShadowBinds').
-> (ModGuts -> CoreM ModGuts)
sequentPass process =
bindsOnlyPass (fmap bindsToCore . process . fromCoreModule . deShadowBinds)
-- | Similar to 'sequentPass', but takes a 'SeqFlags' for use by the
-- translation.
sequentPassWithFlags :: SeqFlags
-> ([SeqCoreBind] -> CoreM [SeqCoreBind])
-> (ModGuts -> CoreM ModGuts)
sequentPassWithFlags sflags process =
bindsOnlyPass $ \binds -> do
term <- fromCoreModuleM sflags (deShadowBinds binds)
term' <- process term
return $ bindsToCore term'
|
lukemaurer/sequent-core
|
src/Language/SequentCore/Plugin.hs
|
Haskell
|
bsd-3-clause
| 1,741
|
{-# LANGUAGE BangPatterns, DeriveDataTypeable, DeriveGeneric, FlexibleInstances, MultiParamTypeClasses #-}
{-# OPTIONS_GHC -fno-warn-unused-imports #-}
module Web.RTBBidder.Protocol.Adx.BidRequest.AdSlot.NativeAdTemplate.Fields (Fields(..)) where
import Prelude ((+), (/), (.))
import qualified Prelude as Prelude'
import qualified Data.Typeable as Prelude'
import qualified GHC.Generics as Prelude'
import qualified Data.Data as Prelude'
import qualified Text.ProtocolBuffers.Header as P'
data Fields = NO_FIELDS
| HEADLINE
| BODY
| CALL_TO_ACTION
| ADVERTISER
| IMAGE
| LOGO
| APP_ICON
| STAR_RATING
| PRICE
| STORE
| VIDEO
deriving (Prelude'.Read, Prelude'.Show, Prelude'.Eq, Prelude'.Ord, Prelude'.Typeable, Prelude'.Data, Prelude'.Generic)
instance P'.Mergeable Fields
instance Prelude'.Bounded Fields where
minBound = NO_FIELDS
maxBound = VIDEO
instance P'.Default Fields where
defaultValue = NO_FIELDS
toMaybe'Enum :: Prelude'.Int -> P'.Maybe Fields
toMaybe'Enum 0 = Prelude'.Just NO_FIELDS
toMaybe'Enum 1 = Prelude'.Just HEADLINE
toMaybe'Enum 2 = Prelude'.Just BODY
toMaybe'Enum 4 = Prelude'.Just CALL_TO_ACTION
toMaybe'Enum 8 = Prelude'.Just ADVERTISER
toMaybe'Enum 16 = Prelude'.Just IMAGE
toMaybe'Enum 32 = Prelude'.Just LOGO
toMaybe'Enum 64 = Prelude'.Just APP_ICON
toMaybe'Enum 128 = Prelude'.Just STAR_RATING
toMaybe'Enum 256 = Prelude'.Just PRICE
toMaybe'Enum 512 = Prelude'.Just STORE
toMaybe'Enum 1024 = Prelude'.Just VIDEO
toMaybe'Enum _ = Prelude'.Nothing
instance Prelude'.Enum Fields where
fromEnum NO_FIELDS = 0
fromEnum HEADLINE = 1
fromEnum BODY = 2
fromEnum CALL_TO_ACTION = 4
fromEnum ADVERTISER = 8
fromEnum IMAGE = 16
fromEnum LOGO = 32
fromEnum APP_ICON = 64
fromEnum STAR_RATING = 128
fromEnum PRICE = 256
fromEnum STORE = 512
fromEnum VIDEO = 1024
toEnum
= P'.fromMaybe
(Prelude'.error
"hprotoc generated code: toEnum failure for type Web.RTBBidder.Protocol.Adx.BidRequest.AdSlot.NativeAdTemplate.Fields")
. toMaybe'Enum
succ NO_FIELDS = HEADLINE
succ HEADLINE = BODY
succ BODY = CALL_TO_ACTION
succ CALL_TO_ACTION = ADVERTISER
succ ADVERTISER = IMAGE
succ IMAGE = LOGO
succ LOGO = APP_ICON
succ APP_ICON = STAR_RATING
succ STAR_RATING = PRICE
succ PRICE = STORE
succ STORE = VIDEO
succ _
= Prelude'.error
"hprotoc generated code: succ failure for type Web.RTBBidder.Protocol.Adx.BidRequest.AdSlot.NativeAdTemplate.Fields"
pred HEADLINE = NO_FIELDS
pred BODY = HEADLINE
pred CALL_TO_ACTION = BODY
pred ADVERTISER = CALL_TO_ACTION
pred IMAGE = ADVERTISER
pred LOGO = IMAGE
pred APP_ICON = LOGO
pred STAR_RATING = APP_ICON
pred PRICE = STAR_RATING
pred STORE = PRICE
pred VIDEO = STORE
pred _
= Prelude'.error
"hprotoc generated code: pred failure for type Web.RTBBidder.Protocol.Adx.BidRequest.AdSlot.NativeAdTemplate.Fields"
instance P'.Wire Fields where
wireSize ft' enum = P'.wireSize ft' (Prelude'.fromEnum enum)
wirePut ft' enum = P'.wirePut ft' (Prelude'.fromEnum enum)
wireGet 14 = P'.wireGetEnum toMaybe'Enum
wireGet ft' = P'.wireGetErr ft'
wireGetPacked 14 = P'.wireGetPackedEnum toMaybe'Enum
wireGetPacked ft' = P'.wireGetErr ft'
instance P'.GPB Fields
instance P'.MessageAPI msg' (msg' -> Fields) Fields where
getVal m' f' = f' m'
instance P'.ReflectEnum Fields where
reflectEnum
= [(0, "NO_FIELDS", NO_FIELDS), (1, "HEADLINE", HEADLINE), (2, "BODY", BODY), (4, "CALL_TO_ACTION", CALL_TO_ACTION),
(8, "ADVERTISER", ADVERTISER), (16, "IMAGE", IMAGE), (32, "LOGO", LOGO), (64, "APP_ICON", APP_ICON),
(128, "STAR_RATING", STAR_RATING), (256, "PRICE", PRICE), (512, "STORE", STORE), (1024, "VIDEO", VIDEO)]
reflectEnumInfo _
= P'.EnumInfo
(P'.makePNF (P'.pack ".Adx.BidRequest.AdSlot.NativeAdTemplate.Fields") ["Web", "RTBBidder", "Protocol"]
["Adx", "BidRequest", "AdSlot", "NativeAdTemplate"]
"Fields")
["Web", "RTBBidder", "Protocol", "Adx", "BidRequest", "AdSlot", "NativeAdTemplate", "Fields.hs"]
[(0, "NO_FIELDS"), (1, "HEADLINE"), (2, "BODY"), (4, "CALL_TO_ACTION"), (8, "ADVERTISER"), (16, "IMAGE"), (32, "LOGO"),
(64, "APP_ICON"), (128, "STAR_RATING"), (256, "PRICE"), (512, "STORE"), (1024, "VIDEO")]
instance P'.TextType Fields where
tellT = P'.tellShow
getT = P'.getRead
|
hiratara/hs-rtb-bidder
|
src/Web/RTBBidder/Protocol/Adx/BidRequest/AdSlot/NativeAdTemplate/Fields.hs
|
Haskell
|
bsd-3-clause
| 4,477
|
-- |
-- Helper methods used to construct requests.
--
module Network.TableStorage.Request (
propertyList,
entityKeyResource,
columnToTypeString,
printEntityColumn,
printComparisonType,
buildFilterString,
buildQueryString
) where
import Data.Time ( formatTime )
import System.Locale ( defaultTimeLocale )
import Data.Maybe ( fromMaybe )
import Data.List ( intercalate )
import Text.XML.Light.Types ( elAttribs )
import Text.XML.Light
( Element(elContent, elName),
Content(Elem),
Attr(Attr),
blank_element )
import Network.TableStorage.Types
( EntityFilter(..),
ComparisonType(..),
EntityQuery(eqFilter, eqPageSize),
EntityColumn(..),
EntityKey(ekPartitionKey, ekRowKey) )
import Network.TableStorage.XML ( cDataText )
import Network.TableStorage.Atom
( qualifyDataServices, qualifyMetadata )
import Network.TableStorage.Format ( atomDateFormat )
import Network.HTTP.Base ( urlEncode )
-- |
-- Formats a list of entity properties for inclusion in an Atom entry.
--
propertyList :: [(String, EntityColumn)] -> Element
propertyList props =
blank_element { elName = qualifyMetadata "properties",
elContent = map property props } where
property (key, value) =
let stringValue = printEntityColumn value in
Elem blank_element { elName = qualifyDataServices key,
elAttribs = [ Attr (qualifyMetadata "type") $ columnToTypeString value,
Attr (qualifyMetadata "null") $ maybe "true" (const "false") stringValue ],
elContent = cDataText $ fromMaybe "" stringValue }
-- |
-- Constructs relative URIs which refer to the entity with the specified table name
-- and entity key.
--
entityKeyResource :: String -> EntityKey -> String
entityKeyResource tableName key = "/" ++ tableName ++ "(PartitionKey='" ++ ekPartitionKey key ++ "',RowKey='" ++ ekRowKey key ++ "')"
-- |
-- Converts an entity column into its type name
--
columnToTypeString :: EntityColumn -> String
columnToTypeString (EdmBinary _) = "Edm.Binary"
columnToTypeString (EdmBoolean _) = "Edm.Boolean"
columnToTypeString (EdmDateTime _) = "Edm.DateTime"
columnToTypeString (EdmDouble _) = "Edm.Double"
columnToTypeString (EdmGuid _) = "Edm.EdmGuid"
columnToTypeString (EdmInt32 _) = "Edm.Int32"
columnToTypeString (EdmInt64 _) = "Edm.Int64"
columnToTypeString (EdmString _) = "Edm.String"
-- |
-- Formats a column value to appear in the body of an Atom entry
--
printEntityColumn :: EntityColumn -> Maybe String
printEntityColumn (EdmBinary (Just val)) = Just val
printEntityColumn (EdmBoolean (Just True)) = Just "true"
printEntityColumn (EdmBoolean (Just False)) = Just "false"
printEntityColumn (EdmDateTime (Just val)) = Just $ formatTime defaultTimeLocale atomDateFormat val
printEntityColumn (EdmDouble (Just val)) = Just $ show val
printEntityColumn (EdmGuid (Just val)) = Just val
printEntityColumn (EdmInt32 (Just val)) = Just $ show val
printEntityColumn (EdmInt64 (Just val)) = Just $ show val
printEntityColumn (EdmString (Just val)) = Just val
printEntityColumn _ = Nothing
-- |
-- Formats a comparison type to appear in the query string
--
printComparisonType :: ComparisonType -> String
printComparisonType Equal = "eq"
printComparisonType GreaterThan = "gt"
printComparisonType GreaterThanOrEqual = "ge"
printComparisonType LessThan = "lt"
printComparisonType LessThanOrEqual = "le"
printComparisonType NotEqual = "ne"
-- |
-- Converts entity filter values into strings to appear in the filter
-- portion of the Query Entities URI.
--
buildFilterString :: EntityFilter -> String
buildFilterString (And fs) = '(' : intercalate "%20and%20" (map buildFilterString fs) ++ ")"
buildFilterString (Or fs) = '(' : intercalate "%20or%20" (map buildFilterString fs) ++ ")"
buildFilterString (Not f) =
"(not%20"
++ buildFilterString f
++ ")"
buildFilterString (CompareBoolean prop val) =
urlEncode prop
++ "%20eq%20"
++ if val then "true" else "false"
buildFilterString (CompareDateTime prop cmp val) =
urlEncode prop
++ "%20"
++ printComparisonType cmp
++ "%20datetime'"
++ formatTime defaultTimeLocale atomDateFormat val
++ "'"
buildFilterString (CompareDouble prop cmp val) =
urlEncode prop
++ "%20"
++ printComparisonType cmp
++ "%20"
++ show val
buildFilterString (CompareGuid prop val) =
urlEncode prop
++ "%20eq%20guid'"
++ val
++ "'"
buildFilterString (CompareInt32 prop cmp val) =
urlEncode prop
++ "%20"
++ printComparisonType cmp
++ "%20"
++ show val
buildFilterString (CompareInt64 prop cmp val) =
urlEncode prop
++ "%20"
++ printComparisonType cmp
++ "%20"
++ show val
buildFilterString (CompareString prop cmp val) =
urlEncode prop
++ "%20"
++ printComparisonType cmp
++ "%20'"
++ urlEncode val
++ "'"
-- |
-- Constructs the full query string for the Query Entities web method.
--
buildQueryString :: EntityQuery -> String
buildQueryString query =
"$filter="
++ maybe "" buildFilterString (eqFilter query)
++ "&$top="
++ maybe "" show (eqPageSize query)
|
paf31/tablestorage
|
src/Network/TableStorage/Request.hs
|
Haskell
|
bsd-3-clause
| 5,280
|
{-# LANGUAGE PolyKinds #-}
module Data.Flip
( Flip (..)
) where
newtype Flip f a b = Flip { getFlip :: f b a }
|
sonyandy/unify
|
examples/unify-hm/Data/Flip.hs
|
Haskell
|
bsd-3-clause
| 126
|
{-# LANGUAGE DataKinds #-}
{-# LANGUAGE DeriveAnyClass #-}
{-# LANGUAGE DeriveGeneric #-}
{-# LANGUAGE FlexibleContexts #-}
{-# LANGUAGE FlexibleInstances #-}
{-# LANGUAGE OverloadedStrings #-}
{-# LANGUAGE StandaloneDeriving #-}
{-# LANGUAGE TemplateHaskell #-}
{-# LANGUAGE TypeOperators #-}
{-# LANGUAGE TypeSynonymInstances #-}
module TransactionServer where
import System.Random
import Control.Monad.Trans.Except
import Control.Monad.Trans.Resource
import Control.Monad.IO.Class
import Data.Aeson
import Data.Aeson.TH
import Data.Bson.Generic
import GHC.Generics
import Network.Wai hiding(Response)
import Network.Wai.Handler.Warp
import Network.Wai.Logger
import Servant
import Servant.API
import Servant.Client
import System.IO
import System.Directory
import System.Environment (getArgs, getProgName, lookupEnv)
import System.Log.Formatter
import System.Log.Handler (setFormatter)
import System.Log.Handler.Simple
import System.Log.Handler.Syslog
import System.Log.Logger
import Data.Bson.Generic
import qualified Data.List as DL
import Data.Maybe (catMaybes)
import Data.Text (pack, unpack)
import Data.Time.Clock (UTCTime, getCurrentTime)
import Data.Time.Format (defaultTimeLocale, formatTime)
import Database.MongoDB
import Control.Monad (when)
import Network.HTTP.Client (newManager, defaultManagerSettings)
import CommonResources
import MongodbHelpers
type ApiHandler = ExceptT ServantErr IO
transactionApi :: Proxy TransactionApi
transactionApi = Proxy
server :: Server TransactionApi
server =
beginTrans :<|>
downloadTrans :<|>
uploadTrans :<|>
commitTrans
transactionApp :: Application
transactionApp = serve transactionApi server
directoryApi :: Proxy DirectoryApi
directoryApi = Proxy
join :: FileServer -> ClientM Response
open :: FileName -> ClientM File
close :: FileUpload -> ClientM Response
allfiles :: Ticket -> ClientM [String]
remove :: FileName -> ClientM Response
join :<|> open :<|> close :<|> allfiles :<|> remove = client directoryApi
runApp :: IO()
runApp = do
putStrLn ("Starting TransactionServer on port: " ++ transserverport)
run (read (transserverport) ::Int) transactionApp
beginTrans :: Ticket -> ApiHandler Response
beginTrans (Ticket ticket encryptedTimeout) = liftIO $ do
let sessionKey = encryptDecrypt sharedSecret ticket
let decryptedTimeout = decryptTime sharedSecret encryptedTimeout
putStrLn ("Checking Client Credentials...")
currentTime <- getCurrentTime
if (currentTime > decryptedTimeout) then do
putStrLn "Client session timeout"
return (Response (encryptDecrypt sessionKey "Failed"))
else do
putStrLn "Starting transaction"
putStrLn "Storing client sessionKey as transaction ID"
withMongoDbConnection $ upsert (select ["transactionID" =: sessionKey] "TRANSACTION_ID_RECORD") $ toBSON sessionKey
return (Response (encryptDecrypt sessionKey "Successful"))
downloadTrans :: FileName -> ApiHandler File
downloadTrans fileName@(FileName ticket encryptedTimeout encryptedFN) = liftIO $ do
let sessionKey = encryptDecrypt sharedSecret ticket
let decryptedTimeout = decryptTime sharedSecret encryptedTimeout
let decryptedFN = encryptDecrypt sessionKey encryptedFN
putStrLn ("Checking Client Credentials...")
currentTime <- getCurrentTime
if (currentTime > decryptedTimeout) then do
putStrLn "Client session timeout"
return (File "Failed" "Failed")
else do
manager <- newManager defaultManagerSettings
res <- runClientM (open fileName) (ClientEnv manager (BaseUrl Http dirserverhost (read (dirserverport) :: Int) ""))
case res of
Left err -> do
putStrLn (show err)
return (File "Failed" "Failed")
Right file -> do
putStrLn "Storing file transaction data"
withMongoDbConnection $ upsert (select ["userID" =: sessionKey, "transFileName" =: decryptedFN] "TRANSACTION_FILE_RECORD") $ toBSON (TransactionFile decryptedFN sessionKey)
return file
uploadTrans :: FileUpload -> ApiHandler Response
uploadTrans fileUpload@(FileUpload ticket encryptedTimeout (File encryptedFN encryptedFC)) = liftIO $ do
let sessionKey = encryptDecrypt sharedSecret ticket
let decryptedTimeout = decryptTime sharedSecret encryptedTimeout
let decryptedFN = encryptDecrypt sessionKey encryptedFN
putStrLn ("Checking Client Credentials...")
currentTime <- getCurrentTime
if (currentTime > decryptedTimeout) then do
putStrLn "Client session timeout"
return (Response (encryptDecrypt sessionKey "Failed"))
else do
manager <- newManager defaultManagerSettings
let tempFileName = encryptDecrypt sessionKey ("TMP~"++decryptedFN)
let fupload = FileUpload ticket encryptedTimeout (File tempFileName encryptedFC)
res <- runClientM (TransactionServer.close fupload) (ClientEnv manager (BaseUrl Http dirserverhost (read (dirserverport) :: Int) ""))
case res of
Left err -> do
putStrLn (show err)
return (Response (encryptDecrypt sessionKey "Failed"))
Right (Response response) -> do
let decryptedres = encryptDecrypt sessionKey response
putStrLn ("Uploaded temp file - " ++ decryptedres)
return (Response response)
commitTrans :: Ticket -> ApiHandler Response
commitTrans tic@(Ticket ticket encryptedTimeout) = liftIO $ do
let sessionKey = encryptDecrypt sharedSecret ticket
let decryptedTimeout = decryptTime sharedSecret encryptedTimeout
putStrLn ("Checking Client Credentials...")
currentTime <- getCurrentTime
if (currentTime > decryptedTimeout) then do
putStrLn "Client session timeout"
return (Response (encryptDecrypt sessionKey "Failed"))
else do
transactions <- liftIO $ withMongoDbConnection $ do
docs <- find (select ["userID" =: sessionKey] "TRANSACTION_FILE_RECORD") >>= drainCursor
return $ catMaybes $ DL.map (\ b -> fromBSON b :: Maybe TransactionFile) docs
mapM (commitfile tic) transactions
liftIO $ withMongoDbConnection $ do
delete (select ["userID" =: sessionKey] "TRANSACTION_FILE_RECORD")
liftIO $ withMongoDbConnection $ do
delete (select ["transactionID" =: sessionKey] "TRANSACTION_ID_RECORD")
return (Response (encryptDecrypt sessionKey "Successful"))
commitfile :: Ticket -> TransactionFile -> IO()
commitfile (Ticket ticket encryptedTimeout) (TransactionFile decryptedFN sessionKey) = liftIO $ do
putStrLn ("Commiting file: " ++ decryptedFN)
manager <- newManager defaultManagerSettings
let temp_file = encryptDecrypt sessionKey ("TMP~"++ decryptedFN)
let fileName = (FileName ticket encryptedTimeout temp_file)
res <- runClientM (open fileName) (ClientEnv manager (BaseUrl Http dirserverhost (read (dirserverport) :: Int) ""))
case res of
Left err -> putStrLn (show err)
Right (File encryptedFN encryptedFC) -> do
let fn = encryptDecrypt sessionKey encryptedFN
let temp = encryptDecrypt sessionKey temp_file
case (temp == fn) of
False -> putStrLn "Commit Failed"
True -> do
let fileupload = (FileUpload ticket encryptedTimeout (File (encryptDecrypt sessionKey decryptedFN) encryptedFC))
res <- runClientM (TransactionServer.close fileupload) (ClientEnv manager (BaseUrl Http dirserverhost (read (dirserverport) :: Int) ""))
case res of
Left err -> do putStrLn (show err)
Right (Response response) -> do
let uploadresponse = encryptDecrypt sessionKey response
putStrLn uploadresponse
case uploadresponse of
"Success" -> do
res <- runClientM (remove (FileName ticket encryptedTimeout temp_file)) (ClientEnv manager (BaseUrl Http dirserverhost (read (dirserverport) :: Int) ""))
case res of
Left err -> putStrLn (show err)
Right (Response response) -> putStrLn (encryptDecrypt sessionKey response)
_ -> putStrLn "Shouldnt get here"
|
Garygunn94/DFS
|
TransactionServer/src/TransactionServer.hs
|
Haskell
|
bsd-3-clause
| 8,739
|
-- Copyright (c) 2016-present, Facebook, Inc.
-- All rights reserved.
--
-- This source code is licensed under the BSD-style license found in the
-- LICENSE file in the root directory of this source tree.
module Duckling.Temperature.IT.Tests
( tests ) where
import Data.String
import Prelude
import Test.Tasty
import Duckling.Dimensions.Types
import Duckling.Temperature.IT.Corpus
import Duckling.Testing.Asserts
tests :: TestTree
tests = testGroup "IT Tests"
[ makeCorpusTest [Seal Temperature] corpus
]
|
facebookincubator/duckling
|
tests/Duckling/Temperature/IT/Tests.hs
|
Haskell
|
bsd-3-clause
| 515
|
{-# LANGUAGE ScopedTypeVariables #-}
module Rad.QL.Define.Schema where
import Data.Monoid ((<>))
import qualified Data.Trie as Trie
import Rad.QL.Internal.Types
import Rad.QL.AST
import Rad.QL.Types
import Rad.QL.Query
defineSchema :: forall m b. (GraphQLType OBJECT m b) => b -> Schema m
defineSchema root = Schema
{ typeDict = collectTypes Trie.empty [tdef]
, rootQuery = res
, rootQueryType = tdef
}
where rdef = def :: GraphQLTypeDef OBJECT m b
tdef = gqlTypeDef rdef
res ss = unpackSub $ (gqlResolve rdef) ss root
unpackSub (SubResult m) = return m
unpackSub (SubResultM m) = m
collectTypes :: TypeDict -> [TypeDef] -> TypeDict
collectTypes seen [] = seen
collectTypes seen (t:ts)
| Trie.member n seen = collectTypes seen ts
| otherwise = collectTypes (Trie.insert n t seen) ts'
where n = typeDefName t
ts' = case t of
TypeDefObject (ObjectTypeDef _ _ ifs fdefs) ->
ts <> [ TypeDefInterface ifdef | ifdef <- ifs ]
<> [ t' | FieldDef _ _ _ _ t' _ <- fdefs ]
<> [ t' | FieldDef _ _ argdefs _ _ _ <- fdefs
, InputValueDef _ _ _ t' _ <- argdefs
]
TypeDefInterface (InterfaceTypeDef _ _ fdefs) ->
ts <> [ t' | FieldDef _ _ _ _ t' _ <- fdefs ]
<> [ t' | FieldDef _ _ argdefs _ _ _ <- fdefs
, InputValueDef _ _ _ t' _ <- argdefs
]
TypeDefUnion (UnionTypeDef _ _ odefs) ->
ts <> [ TypeDefObject o | o <- odefs ]
_ -> ts
|
jqyu/bustle-chi
|
src/Rad/QL/Define/Schema.hs
|
Haskell
|
bsd-3-clause
| 1,685
|
{-# LANGUAGE OverloadedStrings #-}
module Milter.Base (
Packet (..)
, getPacket
, getIP
, getKeyVal
, getBody
, negotiate
, accept, discard, hold, reject, continue
) where
import Blaze.ByteString.Builder
import Blaze.ByteString.Builder.Char8
import Control.Applicative
import Control.Monad
import qualified Data.ByteString as X (unpack)
import Data.ByteString.Char8 (ByteString)
import qualified Data.ByteString.Char8 as BS
import Data.IP
import Data.List (foldl')
import Data.Monoid
import System.IO
----------------------------------------------------------------
accept :: Handle -> IO ()
accept hdl = safePutPacket hdl $ Packet 'a' ""
discard :: Handle -> IO ()
discard hdl = safePutPacket hdl $ Packet 'd' ""
hold :: Handle -> IO ()
hold hdl = safePutPacket hdl $ Packet 't' ""
reject :: Handle -> IO ()
reject hdl = safePutPacket hdl $ Packet 'r' ""
continue :: Handle -> IO ()
continue hdl = safePutPacket hdl $ Packet 'c' ""
----------------------------------------------------------------
data Packet = Packet Char ByteString
getPacket :: Handle -> IO Packet
getPacket hdl = do
n <- fourBytesToInt <$> getNByte hdl 4
Packet <$> getCmd hdl <*> getNByte hdl (n - 1)
putPacket :: Handle -> Packet -> IO ()
putPacket hdl (Packet c bs) = do
let len = BS.length bs + 1
pkt = intToFourBytes len <> fromChar c <> fromByteString bs
BS.hPut hdl $ toByteString pkt
safePutPacket :: Handle -> Packet -> IO ()
safePutPacket hdl pkt = withOpenedHandleDo hdl $ putPacket hdl pkt
withOpenedHandleDo :: Handle -> IO () -> IO ()
withOpenedHandleDo hdl block = do
closed <- hIsClosed hdl
unless closed block
----------------------------------------------------------------
getKeyVal :: ByteString -> (ByteString, ByteString)
getKeyVal bs = (key,val)
where
kv = BS.split '\0' bs
key = kv !! 0
val = kv !! 1
----------------------------------------------------------------
getBody :: ByteString -> ByteString
getBody = BS.init -- removing the last '\0'
----------------------------------------------------------------
getIP :: ByteString -> IP
getIP bs
| fam == '4' = IPv4 . read $ adr
| otherwise = IPv6 . read $ adr
where
ip = BS.split '\0' bs !! 1
fam = BS.head ip
adr = BS.unpack $ BS.drop 3 ip
----------------------------------------------------------------
negotiate :: Handle -> IO ()
negotiate hdl = putPacket hdl negoPkt -- do NOT use safePutPacket
negoPkt :: Packet
negoPkt = Packet 'O' $ toByteString $ ver <> act <> pro
where
ver = intToFourBytes 2 -- Sendmail 8.13.8, sigh
act = intToFourBytes 0
pro = intToFourBytes noRcpt
noRcpt :: Int
noRcpt = 0x8
{- version 2 does not support, sigh
noUnknown = 0x100
noData = 0x200
-}
----------------------------------------------------------------
getNByte :: Handle -> Int -> IO ByteString
getNByte = BS.hGet
getCmd :: Handle -> IO Char
getCmd hdl = BS.head <$> BS.hGet hdl 1
fourBytesToInt :: ByteString -> Int
fourBytesToInt = foldl' (\a b -> a * 256 + b) 0 . map fromIntegral . X.unpack
intToFourBytes :: Int -> Builder
intToFourBytes = fromInt32be . fromIntegral
{-
moddiv :: Int -> [Int]
moddiv q0 = [r4,r3,r2,r1]
where
(q1,r1) = q0 `divMod` 256
(q2,r2) = q1 `divMod` 256
(q3,r3) = q2 `divMod` 256
r4 = q3 `mod` 256
-}
|
kazu-yamamoto/rpf
|
Milter/Base.hs
|
Haskell
|
bsd-3-clause
| 3,325
|
{-
Gifcurry
(C) 2017 David Lettier
lettier.com
-}
{-# LANGUAGE
NamedFieldPuns
, DuplicateRecordFields
#-}
module GuiKeyboard where
import Control.Monad
import Data.IORef
import Data.Word
import qualified GI.Gdk
import qualified GI.Gtk
import qualified GuiRecords as GR
import qualified GuiPreview
import GuiMisc
addKeyboardEventHandler
:: GR.GuiComponents
-> IO ()
addKeyboardEventHandler
guiComponents@GR.GuiComponents
{ GR.window
}
=
void
$ GI.Gtk.onWidgetKeyPressEvent window
$ keyboardEventHandler guiComponents
keyboardEventHandler
:: GR.GuiComponents
-> GI.Gdk.EventKey
-> IO Bool
keyboardEventHandler
guiComponents@GR.GuiComponents
{
}
eventKey
= do
keyValue <- GI.Gdk.getEventKeyKeyval eventKey
let isSeekLeft = isSeekLeftKey keyValue
let isSeekRight = isSeekRightKey keyValue
let isSeek = isSeekLeft || isSeekRight
when isSeek $ handleSeekKeys guiComponents isSeekLeft
return False
handleSeekKeys
:: GR.GuiComponents
-> Bool
-> IO ()
handleSeekKeys
guiComponents@GR.GuiComponents
{ GR.startTimeSpinButton
, GR.endTimeSpinButton
, GR.videoPreviewPauseToggleButton
, GR.guiInFilePropertiesRef
, GR.maybeVideoPreviewWidget = (Just _)
, GR.maybePlaybinElement = (Just _)
}
isSeekLeft
= do
(maybePlaybinDuration, maybePlaybinPosition)
<- GuiPreview.getPlaybinDurationAndPosition guiComponents
case (maybePlaybinDuration, maybePlaybinPosition) of
(Just _, Just playbinPosition) -> do
void
$ GI.Gtk.setToggleButtonActive videoPreviewPauseToggleButton True
startTime <-
secondsToNanoseconds
<$> GI.Gtk.spinButtonGetValue startTimeSpinButton
endTime <-
secondsToNanoseconds
<$> GI.Gtk.spinButtonGetValue endTimeSpinButton
GR.GuiInFileProperties
{ GR.inFileFps
} <- readIORef guiInFilePropertiesRef
let fps = if inFileFps <= 0 then 1 else inFileFps
let inc = doubleToInt64 $ (1 / fps) * nanosecondsInASecond
let seekTo = if isSeekLeft then playbinPosition - inc else playbinPosition + inc
let seekTo'
| seekTo >= endTime = endTime
| seekTo <= startTime = startTime
| otherwise = seekTo
GuiPreview.seekPlaybinElement
guiComponents
(Just seekTo')
(Just endTime)
_ -> return ()
handleSeekKeys _ _ = return ()
isSeekLeftKey
:: Word32
-> Bool
isSeekLeftKey GI.Gdk.KEY_less = True
isSeekLeftKey _ = False
isSeekRightKey
:: Word32
-> Bool
isSeekRightKey GI.Gdk.KEY_greater = True
isSeekRightKey _ = False
|
lettier/gifcurry
|
src/gui/GuiKeyboard.hs
|
Haskell
|
bsd-3-clause
| 2,707
|
{-
This file is part of the Haskell package distinfo. It is subject to
the license terms in the LICENSE file found in the top-level directory
of this distribution and at git://pmade.com/distinfo/LICENSE. No part
of the distinfo package, including this file, may be copied, modified,
propagated, or distributed except according to the terms contained in
the LICENSE file.
-}
--------------------------------------------------------------------------------
module DistInfo (module Export) where
--------------------------------------------------------------------------------
import DistInfo.Server as Export
import DistInfo.Node as Export
|
devalot/distinfo
|
src/DistInfo.hs
|
Haskell
|
bsd-3-clause
| 644
|
{-# LANGUAGE DeriveDataTypeable #-}
{-# LANGUAGE FlexibleContexts #-}
{-# LANGUAGE TypeFamilies #-}
{-# LANGUAGE FlexibleInstances #-}
{-# LANGUAGE OverloadedStrings #-}
{-# LANGUAGE QuasiQuotes #-}
{- |
Core types for content.
The whole site is a list of categories ('Category'). Categories have items
('Item') in them. Items have some sections (fields inside of 'Item'), as well
as traits ('Trait').
It is recommended to use lenses to access fields of various types. All those
lenses are exported from this module. Some lenses (like 'uid' and 'name') are
overloaded and can be used with many types.
-}
module Guide.Types.Core
(
Trait(..),
ItemKind(..),
hackageName,
ItemSection(..),
Item(..),
pros,
prosDeleted,
cons,
consDeleted,
ecosystem,
link,
kind,
Hue(..),
hueToDarkColor,
hueToLightColor,
CategoryStatus(..),
Category(..),
title,
status,
enabledSections,
groups,
items,
itemsDeleted,
categorySlug,
-- * Overloaded things
uid,
hasUid,
content,
name,
description,
notes,
created,
group_,
)
where
import Imports
-- Text
import qualified Data.Text.All as T
-- Containers
import qualified Data.Set as S
-- JSON
import qualified Data.Aeson as A
-- acid-state
import Data.SafeCopy hiding (kind)
import Data.SafeCopy.Migrate
import Guide.Markdown
import Guide.Utils
import Guide.Types.Hue
----------------------------------------------------------------------------
-- General notes on code
----------------------------------------------------------------------------
{-
If you want to add a field to one of the types, see Note [extending types].
For an explanation of deriveSafeCopySorted, see Note [acid-state].
-}
----------------------------------------------------------------------------
-- Trait
----------------------------------------------------------------------------
-- | A trait (pro or con). Traits are stored in items.
data Trait = Trait {
_traitUid :: Uid Trait,
_traitContent :: MarkdownInline }
deriving (Show, Generic, Data)
deriveSafeCopySorted 4 'extension ''Trait
makeFields ''Trait
changelog ''Trait (Current 4, Past 3) []
deriveSafeCopySorted 3 'base ''Trait_v3
instance A.ToJSON Trait where
toJSON = A.genericToJSON A.defaultOptions {
A.fieldLabelModifier = over _head toLower . drop (T.length "_trait") }
----------------------------------------------------------------------------
-- Item
----------------------------------------------------------------------------
-- | Kind of an item (items can be libraries, tools, etc).
data ItemKind
= Library (Maybe Text) -- Hackage name
| Tool (Maybe Text) -- Hackage name
| Other
deriving (Eq, Show, Generic, Data)
deriveSafeCopySimple 3 'extension ''ItemKind
hackageName :: Traversal' ItemKind (Maybe Text)
hackageName f (Library x) = Library <$> f x
hackageName f (Tool x) = Tool <$> f x
hackageName _ Other = pure Other
instance A.ToJSON ItemKind where
toJSON (Library x) = A.object [
"tag" A..= ("Library" :: Text),
"contents" A..= x ]
toJSON (Tool x) = A.object [
"tag" A..= ("Tool" :: Text),
"contents" A..= x ]
toJSON Other = A.object [
"tag" A..= ("Other" :: Text) ]
data ItemKind_v2
= Library_v2 (Maybe Text)
| Tool_v2 (Maybe Text)
| Other_v2
-- TODO: at the next migration change this to deriveSafeCopySimple!
deriveSafeCopy 2 'base ''ItemKind_v2
instance Migrate ItemKind where
type MigrateFrom ItemKind = ItemKind_v2
migrate (Library_v2 x) = Library x
migrate (Tool_v2 x) = Tool x
migrate Other_v2 = Other
-- | Different kinds of sections inside items. This type is only used for
-- '_categoryEnabledSections'.
data ItemSection
= ItemProsConsSection
| ItemEcosystemSection
| ItemNotesSection
deriving (Eq, Ord, Show, Generic, Data)
deriveSafeCopySimple 0 'base ''ItemSection
instance A.ToJSON ItemSection where
toJSON = A.genericToJSON A.defaultOptions
-- TODO: add a field like “people to ask on IRC about this library if you
-- need help”
-- | An item (usually a library). Items are stored in categories.
data Item = Item {
_itemUid :: Uid Item, -- ^ Item ID
_itemName :: Text, -- ^ Item title
_itemCreated :: UTCTime, -- ^ When the item was created
_itemGroup_ :: Maybe Text, -- ^ Item group (affects item's color)
_itemDescription :: MarkdownBlock, -- ^ Item summary
_itemPros :: [Trait], -- ^ Pros (positive traits)
_itemProsDeleted :: [Trait], -- ^ Deleted pros go here (so that
-- it'd be easy to restore them)
_itemCons :: [Trait], -- ^ Cons (negative traits)
_itemConsDeleted :: [Trait], -- ^ Deleted cons go here
_itemEcosystem :: MarkdownBlock, -- ^ The ecosystem section
_itemNotes :: MarkdownTree, -- ^ The notes section
_itemLink :: Maybe Url, -- ^ Link to homepage or something
_itemKind :: ItemKind -- ^ Is it a library, tool, etc
}
deriving (Show, Generic, Data)
deriveSafeCopySorted 11 'extension ''Item
makeFields ''Item
changelog ''Item (Current 11, Past 10) []
deriveSafeCopySorted 10 'base ''Item_v10
instance A.ToJSON Item where
toJSON = A.genericToJSON A.defaultOptions {
A.fieldLabelModifier = over _head toLower . drop (T.length "_item") }
----------------------------------------------------------------------------
-- Category
----------------------------------------------------------------------------
-- | Category status
data CategoryStatus
= CategoryStub -- ^ “Stub” = just created
| CategoryWIP -- ^ “WIP” = work in progress
| CategoryFinished -- ^ “Finished” = complete or nearly complete
deriving (Eq, Show, Generic, Data)
deriveSafeCopySimple 2 'extension ''CategoryStatus
instance A.ToJSON CategoryStatus where
toJSON = A.genericToJSON A.defaultOptions
data CategoryStatus_v1
= CategoryStub_v1
| CategoryWIP_v1
| CategoryMostlyDone_v1
| CategoryFinished_v1
deriveSafeCopySimple 1 'base ''CategoryStatus_v1
instance Migrate CategoryStatus where
type MigrateFrom CategoryStatus = CategoryStatus_v1
migrate CategoryStub_v1 = CategoryStub
migrate CategoryWIP_v1 = CategoryWIP
migrate CategoryMostlyDone_v1 = CategoryFinished
migrate CategoryFinished_v1 = CategoryFinished
-- | A category
data Category = Category {
_categoryUid :: Uid Category,
_categoryTitle :: Text,
-- | When the category was created
_categoryCreated :: UTCTime,
-- | The “grandcategory” of the category (“meta”, “basics”, etc)
_categoryGroup_ :: Text,
_categoryStatus :: CategoryStatus,
_categoryNotes :: MarkdownBlock,
-- | Items stored in the category
_categoryItems :: [Item],
-- | Items that were deleted from the category. We keep them here to make
-- it easier to restore them
_categoryItemsDeleted :: [Item],
-- | Enabled sections in this category. E.g, if this set contains
-- 'ItemNotesSection', then notes will be shown for each item
_categoryEnabledSections :: Set ItemSection,
-- | All groups of items belonging to the category, as well as their
-- colors. Storing colors explicitly lets us keep colors consistent when
-- all items in a group are deleted
_categoryGroups :: Map Text Hue
}
deriving (Show, Generic, Data)
deriveSafeCopySorted 11 'extension ''Category
makeFields ''Category
changelog ''Category (Current 11, Past 10)
[Removed "_categoryProsConsEnabled" [t|Bool|],
Removed "_categoryEcosystemEnabled" [t|Bool|],
Removed "_categoryNotesEnabled" [t|Bool|],
Added "_categoryEnabledSections" [hs|
S.fromList $ concat
[ [ItemProsConsSection | _categoryProsConsEnabled]
, [ItemEcosystemSection | _categoryEcosystemEnabled]
, [ItemNotesSection | _categoryNotesEnabled] ] |] ]
deriveSafeCopySorted 10 'extension ''Category_v10
changelog ''Category (Past 10, Past 9)
[Added "_categoryNotesEnabled" [hs|True|]]
deriveSafeCopySorted 9 'extension ''Category_v9
changelog ''Category (Past 9, Past 8) []
deriveSafeCopySorted 8 'base ''Category_v8
instance A.ToJSON Category where
toJSON = A.genericToJSON A.defaultOptions {
A.fieldLabelModifier = over _head toLower . drop (T.length "_category") }
-- | Category identifier (used in URLs). E.g. for a category with title
-- “Performance optimization” and UID “t3c9hwzo” the slug would be
-- @performance-optimization-t3c9hwzo@.
categorySlug :: Category -> Text
categorySlug category =
format "{}-{}" (makeSlug (category^.title)) (category^.uid)
----------------------------------------------------------------------------
-- Utils
----------------------------------------------------------------------------
-- | A useful predicate; @hasUid x@ compares given object's UID with @x@.
hasUid :: HasUid a (Uid u) => Uid u -> a -> Bool
hasUid u x = x^.uid == u
|
aelve/hslibs
|
src/Guide/Types/Core.hs
|
Haskell
|
bsd-3-clause
| 9,043
|
-- | Testing what work is shared by GHC. In general you can't assume
-- work will be shared. Sometimes GHC will do CSE and top level
-- floating to share work but not for say papLots which really requires
-- some partial evalutation kind of work. (e.g Max's super evaluator)
module Main where
papLots :: [Double] -> Double -> Double
papLots xs n = n * sum'
where sum' = foldl ((+) . cos . tan . cos . tan . cos . sin . cos) 0 xs
main :: IO ()
main = do
let papShared = papLots [1..5000000]
print "Starting..."
print "First run..."
print $ papLots [1..5000000] 2
print "Second run..."
print $ papLots [1..5000000] 4
print "Third run..."
print $ papShared 3
print "Fourth run..."
print $ papShared 5
|
dterei/Scraps
|
haskell/PapWorkShare.hs
|
Haskell
|
bsd-3-clause
| 745
|
{-# LANGUAGE CPP #-}
#if __GLASGOW_HASKELL__
{-# LANGUAGE MagicHash, UnboxedTuples #-}
#endif
{-# OPTIONS_HADDOCK prune #-}
#if __GLASGOW_HASKELL__ >= 701
{-# LANGUAGE Trustworthy #-}
#endif
-- |
-- Module : Data.ByteString.Char8
-- Copyright : (c) Don Stewart 2006-2008
-- (c) Duncan Coutts 2006-2011
-- License : BSD-style
--
-- Maintainer : dons00@gmail.com, duncan@community.haskell.org
-- Stability : stable
-- Portability : portable
--
-- Manipulate 'ByteString's using 'Char' operations. All Chars will be
-- truncated to 8 bits. It can be expected that these functions will run
-- at identical speeds to their 'Word8' equivalents in "Data.ByteString".
--
-- More specifically these byte strings are taken to be in the
-- subset of Unicode covered by code points 0-255. This covers
-- Unicode Basic Latin, Latin-1 Supplement and C0+C1 Controls.
--
-- See:
--
-- * <http://www.unicode.org/charts/>
--
-- * <http://www.unicode.org/charts/PDF/U0000.pdf>
--
-- * <http://www.unicode.org/charts/PDF/U0080.pdf>
--
-- This module is intended to be imported @qualified@, to avoid name
-- clashes with "Prelude" functions. eg.
--
-- > import qualified Data.ByteString.Char8 as C
--
-- The Char8 interface to bytestrings provides an instance of IsString
-- for the ByteString type, enabling you to use string literals, and
-- have them implicitly packed to ByteStrings.
-- Use @{-\# LANGUAGE OverloadedStrings \#-}@ to enable this.
--
module Data.ByteString.Char8 (
-- * The @ByteString@ type
ByteString, -- abstract, instances: Eq, Ord, Show, Read, Data, Typeable, Monoid
-- * Introducing and eliminating 'ByteString's
empty, -- :: ByteString
singleton, -- :: Char -> ByteString
pack, -- :: String -> ByteString
unpack, -- :: ByteString -> String
-- * Basic interface
cons, -- :: Char -> ByteString -> ByteString
snoc, -- :: ByteString -> Char -> ByteString
append, -- :: ByteString -> ByteString -> ByteString
head, -- :: ByteString -> Char
uncons, -- :: ByteString -> Maybe (Char, ByteString)
unsnoc, -- :: ByteString -> Maybe (ByteString, Char)
last, -- :: ByteString -> Char
tail, -- :: ByteString -> ByteString
init, -- :: ByteString -> ByteString
null, -- :: ByteString -> Bool
length, -- :: ByteString -> Int
-- * Transformating ByteStrings
map, -- :: (Char -> Char) -> ByteString -> ByteString
reverse, -- :: ByteString -> ByteString
intersperse, -- :: Char -> ByteString -> ByteString
intercalate, -- :: ByteString -> [ByteString] -> ByteString
transpose, -- :: [ByteString] -> [ByteString]
-- * Reducing 'ByteString's (folds)
foldl, -- :: (a -> Char -> a) -> a -> ByteString -> a
foldl', -- :: (a -> Char -> a) -> a -> ByteString -> a
foldl1, -- :: (Char -> Char -> Char) -> ByteString -> Char
foldl1', -- :: (Char -> Char -> Char) -> ByteString -> Char
foldr, -- :: (Char -> a -> a) -> a -> ByteString -> a
foldr', -- :: (Char -> a -> a) -> a -> ByteString -> a
foldr1, -- :: (Char -> Char -> Char) -> ByteString -> Char
foldr1', -- :: (Char -> Char -> Char) -> ByteString -> Char
-- ** Special folds
concat, -- :: [ByteString] -> ByteString
concatMap, -- :: (Char -> ByteString) -> ByteString -> ByteString
any, -- :: (Char -> Bool) -> ByteString -> Bool
all, -- :: (Char -> Bool) -> ByteString -> Bool
maximum, -- :: ByteString -> Char
minimum, -- :: ByteString -> Char
-- * Building ByteStrings
-- ** Scans
scanl, -- :: (Char -> Char -> Char) -> Char -> ByteString -> ByteString
scanl1, -- :: (Char -> Char -> Char) -> ByteString -> ByteString
scanr, -- :: (Char -> Char -> Char) -> Char -> ByteString -> ByteString
scanr1, -- :: (Char -> Char -> Char) -> ByteString -> ByteString
-- ** Accumulating maps
mapAccumL, -- :: (acc -> Char -> (acc, Char)) -> acc -> ByteString -> (acc, ByteString)
mapAccumR, -- :: (acc -> Char -> (acc, Char)) -> acc -> ByteString -> (acc, ByteString)
-- ** Generating and unfolding ByteStrings
replicate, -- :: Int -> Char -> ByteString
unfoldr, -- :: (a -> Maybe (Char, a)) -> a -> ByteString
unfoldrN, -- :: Int -> (a -> Maybe (Char, a)) -> a -> (ByteString, Maybe a)
-- * Substrings
-- ** Breaking strings
take, -- :: Int -> ByteString -> ByteString
drop, -- :: Int -> ByteString -> ByteString
splitAt, -- :: Int -> ByteString -> (ByteString, ByteString)
takeWhile, -- :: (Char -> Bool) -> ByteString -> ByteString
dropWhile, -- :: (Char -> Bool) -> ByteString -> ByteString
span, -- :: (Char -> Bool) -> ByteString -> (ByteString, ByteString)
spanEnd, -- :: (Char -> Bool) -> ByteString -> (ByteString, ByteString)
break, -- :: (Char -> Bool) -> ByteString -> (ByteString, ByteString)
breakEnd, -- :: (Char -> Bool) -> ByteString -> (ByteString, ByteString)
group, -- :: ByteString -> [ByteString]
groupBy, -- :: (Char -> Char -> Bool) -> ByteString -> [ByteString]
inits, -- :: ByteString -> [ByteString]
tails, -- :: ByteString -> [ByteString]
-- ** Breaking into many substrings
split, -- :: Char -> ByteString -> [ByteString]
splitWith, -- :: (Char -> Bool) -> ByteString -> [ByteString]
-- ** Breaking into lines and words
lines, -- :: ByteString -> [ByteString]
words, -- :: ByteString -> [ByteString]
unlines, -- :: [ByteString] -> ByteString
unwords, -- :: ByteString -> [ByteString]
-- * Predicates
isPrefixOf, -- :: ByteString -> ByteString -> Bool
isSuffixOf, -- :: ByteString -> ByteString -> Bool
isInfixOf, -- :: ByteString -> ByteString -> Bool
-- ** Search for arbitrary substrings
breakSubstring, -- :: ByteString -> ByteString -> (ByteString,ByteString)
findSubstring, -- :: ByteString -> ByteString -> Maybe Int
findSubstrings, -- :: ByteString -> ByteString -> [Int]
-- * Searching ByteStrings
-- ** Searching by equality
elem, -- :: Char -> ByteString -> Bool
notElem, -- :: Char -> ByteString -> Bool
-- ** Searching with a predicate
find, -- :: (Char -> Bool) -> ByteString -> Maybe Char
filter, -- :: (Char -> Bool) -> ByteString -> ByteString
-- partition -- :: (Char -> Bool) -> ByteString -> (ByteString, ByteString)
-- * Indexing ByteStrings
index, -- :: ByteString -> Int -> Char
elemIndex, -- :: Char -> ByteString -> Maybe Int
elemIndices, -- :: Char -> ByteString -> [Int]
elemIndexEnd, -- :: Char -> ByteString -> Maybe Int
findIndex, -- :: (Char -> Bool) -> ByteString -> Maybe Int
findIndices, -- :: (Char -> Bool) -> ByteString -> [Int]
count, -- :: Char -> ByteString -> Int
-- * Zipping and unzipping ByteStrings
zip, -- :: ByteString -> ByteString -> [(Char,Char)]
zipWith, -- :: (Char -> Char -> c) -> ByteString -> ByteString -> [c]
unzip, -- :: [(Char,Char)] -> (ByteString,ByteString)
-- * Ordered ByteStrings
sort, -- :: ByteString -> ByteString
-- * Reading from ByteStrings
readInt, -- :: ByteString -> Maybe (Int, ByteString)
readInteger, -- :: ByteString -> Maybe (Integer, ByteString)
-- * Low level CString conversions
-- ** Copying ByteStrings
copy, -- :: ByteString -> ByteString
-- ** Packing CStrings and pointers
packCString, -- :: CString -> IO ByteString
packCStringLen, -- :: CStringLen -> IO ByteString
-- ** Using ByteStrings as CStrings
useAsCString, -- :: ByteString -> (CString -> IO a) -> IO a
useAsCStringLen, -- :: ByteString -> (CStringLen -> IO a) -> IO a
-- * I\/O with 'ByteString's
-- | ByteString I/O uses binary mode, without any character decoding
-- or newline conversion. The fact that it does not respect the Handle
-- newline mode is considered a flaw and may be changed in a future version.
-- ** Standard input and output
getLine, -- :: IO ByteString
getContents, -- :: IO ByteString
putStr, -- :: ByteString -> IO ()
putStrLn, -- :: ByteString -> IO ()
interact, -- :: (ByteString -> ByteString) -> IO ()
-- ** Files
readFile, -- :: FilePath -> IO ByteString
writeFile, -- :: FilePath -> ByteString -> IO ()
appendFile, -- :: FilePath -> ByteString -> IO ()
-- mmapFile, -- :: FilePath -> IO ByteString
-- ** I\/O with Handles
hGetLine, -- :: Handle -> IO ByteString
hGetContents, -- :: Handle -> IO ByteString
hGet, -- :: Handle -> Int -> IO ByteString
hGetSome, -- :: Handle -> Int -> IO ByteString
hGetNonBlocking, -- :: Handle -> Int -> IO ByteString
hPut, -- :: Handle -> ByteString -> IO ()
hPutNonBlocking, -- :: Handle -> ByteString -> IO ByteString
hPutStr, -- :: Handle -> ByteString -> IO ()
hPutStrLn, -- :: Handle -> ByteString -> IO ()
) where
import qualified Prelude as P
import Prelude hiding (reverse,head,tail,last,init,null
,length,map,lines,foldl,foldr,unlines
,concat,any,take,drop,splitAt,takeWhile
,dropWhile,span,break,elem,filter,unwords
,words,maximum,minimum,all,concatMap
,scanl,scanl1,scanr,scanr1
,appendFile,readFile,writeFile
,foldl1,foldr1,replicate
,getContents,getLine,putStr,putStrLn,interact
,zip,zipWith,unzip,notElem)
import qualified Data.ByteString as B
import qualified Data.ByteString.Internal as B
import qualified Data.ByteString.Unsafe as B
-- Listy functions transparently exported
import Data.ByteString (empty,null,length,tail,init,append
,inits,tails,reverse,transpose
,concat,take,drop,splitAt,intercalate
,sort,isPrefixOf,isSuffixOf,isInfixOf
,findSubstring,findSubstrings,breakSubstring,copy,group
,getLine, getContents, putStr, interact
,hGetContents, hGet, hGetSome, hPut, hPutStr
,hGetLine, hGetNonBlocking, hPutNonBlocking
,packCString,packCStringLen
,useAsCString,useAsCStringLen
)
import Data.ByteString.Internal
import Data.Char ( isSpace )
import qualified Data.List as List (intersperse)
import System.IO (Handle,stdout,openBinaryFile,hClose,hFileSize,IOMode(..))
#ifndef __NHC__
import Control.Exception (bracket)
#else
import IO (bracket)
#endif
import Foreign
#define STRICT1(f) f a | a `seq` False = undefined
#define STRICT2(f) f a b | a `seq` b `seq` False = undefined
#define STRICT3(f) f a b c | a `seq` b `seq` c `seq` False = undefined
#define STRICT4(f) f a b c d | a `seq` b `seq` c `seq` d `seq` False = undefined
------------------------------------------------------------------------
-- | /O(1)/ Convert a 'Char' into a 'ByteString'
singleton :: Char -> ByteString
singleton = B.singleton . c2w
{-# INLINE singleton #-}
-- | /O(n)/ Convert a 'String' into a 'ByteString'
--
-- For applications with large numbers of string literals, pack can be a
-- bottleneck.
pack :: String -> ByteString
pack = packChars
#if !defined(__GLASGOW_HASKELL__)
{-# INLINE [1] pack #-}
{-# RULES
"ByteString pack/packAddress" forall s .
pack (unpackCString# s) = inlinePerformIO (B.unsafePackAddress s)
#-}
#endif
-- | /O(n)/ Converts a 'ByteString' to a 'String'.
unpack :: ByteString -> [Char]
unpack = B.unpackChars
{-# INLINE unpack #-}
infixr 5 `cons` --same as list (:)
infixl 5 `snoc`
-- | /O(n)/ 'cons' is analogous to (:) for lists, but of different
-- complexity, as it requires a memcpy.
cons :: Char -> ByteString -> ByteString
cons = B.cons . c2w
{-# INLINE cons #-}
-- | /O(n)/ Append a Char to the end of a 'ByteString'. Similar to
-- 'cons', this function performs a memcpy.
snoc :: ByteString -> Char -> ByteString
snoc p = B.snoc p . c2w
{-# INLINE snoc #-}
-- | /O(1)/ Extract the head and tail of a ByteString, returning Nothing
-- if it is empty.
uncons :: ByteString -> Maybe (Char, ByteString)
uncons bs = case B.uncons bs of
Nothing -> Nothing
Just (w, bs') -> Just (w2c w, bs')
{-# INLINE uncons #-}
-- | /O(1)/ Extract the 'init' and 'last' of a ByteString, returning Nothing
-- if it is empty.
unsnoc :: ByteString -> Maybe (ByteString, Char)
unsnoc bs = case B.unsnoc bs of
Nothing -> Nothing
Just (bs', w) -> Just (bs', w2c w)
{-# INLINE unsnoc #-}
-- | /O(1)/ Extract the first element of a ByteString, which must be non-empty.
head :: ByteString -> Char
head = w2c . B.head
{-# INLINE head #-}
-- | /O(1)/ Extract the last element of a packed string, which must be non-empty.
last :: ByteString -> Char
last = w2c . B.last
{-# INLINE last #-}
-- | /O(n)/ 'map' @f xs@ is the ByteString obtained by applying @f@ to each element of @xs@
map :: (Char -> Char) -> ByteString -> ByteString
map f = B.map (c2w . f . w2c)
{-# INLINE map #-}
-- | /O(n)/ The 'intersperse' function takes a Char and a 'ByteString'
-- and \`intersperses\' that Char between the elements of the
-- 'ByteString'. It is analogous to the intersperse function on Lists.
intersperse :: Char -> ByteString -> ByteString
intersperse = B.intersperse . c2w
{-# INLINE intersperse #-}
-- | 'foldl', applied to a binary operator, a starting value (typically
-- the left-identity of the operator), and a ByteString, reduces the
-- ByteString using the binary operator, from left to right.
foldl :: (a -> Char -> a) -> a -> ByteString -> a
foldl f = B.foldl (\a c -> f a (w2c c))
{-# INLINE foldl #-}
-- | 'foldl\'' is like foldl, but strict in the accumulator.
foldl' :: (a -> Char -> a) -> a -> ByteString -> a
foldl' f = B.foldl' (\a c -> f a (w2c c))
{-# INLINE foldl' #-}
-- | 'foldr', applied to a binary operator, a starting value
-- (typically the right-identity of the operator), and a packed string,
-- reduces the packed string using the binary operator, from right to left.
foldr :: (Char -> a -> a) -> a -> ByteString -> a
foldr f = B.foldr (\c a -> f (w2c c) a)
{-# INLINE foldr #-}
-- | 'foldr\'' is a strict variant of foldr
foldr' :: (Char -> a -> a) -> a -> ByteString -> a
foldr' f = B.foldr' (\c a -> f (w2c c) a)
{-# INLINE foldr' #-}
-- | 'foldl1' is a variant of 'foldl' that has no starting value
-- argument, and thus must be applied to non-empty 'ByteStrings'.
foldl1 :: (Char -> Char -> Char) -> ByteString -> Char
foldl1 f ps = w2c (B.foldl1 (\x y -> c2w (f (w2c x) (w2c y))) ps)
{-# INLINE foldl1 #-}
-- | A strict version of 'foldl1'
foldl1' :: (Char -> Char -> Char) -> ByteString -> Char
foldl1' f ps = w2c (B.foldl1' (\x y -> c2w (f (w2c x) (w2c y))) ps)
{-# INLINE foldl1' #-}
-- | 'foldr1' is a variant of 'foldr' that has no starting value argument,
-- and thus must be applied to non-empty 'ByteString's
foldr1 :: (Char -> Char -> Char) -> ByteString -> Char
foldr1 f ps = w2c (B.foldr1 (\x y -> c2w (f (w2c x) (w2c y))) ps)
{-# INLINE foldr1 #-}
-- | A strict variant of foldr1
foldr1' :: (Char -> Char -> Char) -> ByteString -> Char
foldr1' f ps = w2c (B.foldr1' (\x y -> c2w (f (w2c x) (w2c y))) ps)
{-# INLINE foldr1' #-}
-- | Map a function over a 'ByteString' and concatenate the results
concatMap :: (Char -> ByteString) -> ByteString -> ByteString
concatMap f = B.concatMap (f . w2c)
{-# INLINE concatMap #-}
-- | Applied to a predicate and a ByteString, 'any' determines if
-- any element of the 'ByteString' satisfies the predicate.
any :: (Char -> Bool) -> ByteString -> Bool
any f = B.any (f . w2c)
{-# INLINE any #-}
-- | Applied to a predicate and a 'ByteString', 'all' determines if
-- all elements of the 'ByteString' satisfy the predicate.
all :: (Char -> Bool) -> ByteString -> Bool
all f = B.all (f . w2c)
{-# INLINE all #-}
-- | 'maximum' returns the maximum value from a 'ByteString'
maximum :: ByteString -> Char
maximum = w2c . B.maximum
{-# INLINE maximum #-}
-- | 'minimum' returns the minimum value from a 'ByteString'
minimum :: ByteString -> Char
minimum = w2c . B.minimum
{-# INLINE minimum #-}
-- | The 'mapAccumL' function behaves like a combination of 'map' and
-- 'foldl'; it applies a function to each element of a ByteString,
-- passing an accumulating parameter from left to right, and returning a
-- final value of this accumulator together with the new list.
mapAccumL :: (acc -> Char -> (acc, Char)) -> acc -> ByteString -> (acc, ByteString)
mapAccumL f = B.mapAccumL (\acc w -> case f acc (w2c w) of (acc', c) -> (acc', c2w c))
-- | The 'mapAccumR' function behaves like a combination of 'map' and
-- 'foldr'; it applies a function to each element of a ByteString,
-- passing an accumulating parameter from right to left, and returning a
-- final value of this accumulator together with the new ByteString.
mapAccumR :: (acc -> Char -> (acc, Char)) -> acc -> ByteString -> (acc, ByteString)
mapAccumR f = B.mapAccumR (\acc w -> case f acc (w2c w) of (acc', c) -> (acc', c2w c))
-- | 'scanl' is similar to 'foldl', but returns a list of successive
-- reduced values from the left:
--
-- > scanl f z [x1, x2, ...] == [z, z `f` x1, (z `f` x1) `f` x2, ...]
--
-- Note that
--
-- > last (scanl f z xs) == foldl f z xs.
scanl :: (Char -> Char -> Char) -> Char -> ByteString -> ByteString
scanl f z = B.scanl (\a b -> c2w (f (w2c a) (w2c b))) (c2w z)
-- | 'scanl1' is a variant of 'scanl' that has no starting value argument:
--
-- > scanl1 f [x1, x2, ...] == [x1, x1 `f` x2, ...]
scanl1 :: (Char -> Char -> Char) -> ByteString -> ByteString
scanl1 f = B.scanl1 (\a b -> c2w (f (w2c a) (w2c b)))
-- | scanr is the right-to-left dual of scanl.
scanr :: (Char -> Char -> Char) -> Char -> ByteString -> ByteString
scanr f z = B.scanr (\a b -> c2w (f (w2c a) (w2c b))) (c2w z)
-- | 'scanr1' is a variant of 'scanr' that has no starting value argument.
scanr1 :: (Char -> Char -> Char) -> ByteString -> ByteString
scanr1 f = B.scanr1 (\a b -> c2w (f (w2c a) (w2c b)))
-- | /O(n)/ 'replicate' @n x@ is a ByteString of length @n@ with @x@
-- the value of every element. The following holds:
--
-- > replicate w c = unfoldr w (\u -> Just (u,u)) c
--
-- This implemenation uses @memset(3)@
replicate :: Int -> Char -> ByteString
replicate w = B.replicate w . c2w
{-# INLINE replicate #-}
-- | /O(n)/, where /n/ is the length of the result. The 'unfoldr'
-- function is analogous to the List \'unfoldr\'. 'unfoldr' builds a
-- ByteString from a seed value. The function takes the element and
-- returns 'Nothing' if it is done producing the ByteString or returns
-- 'Just' @(a,b)@, in which case, @a@ is the next character in the string,
-- and @b@ is the seed value for further production.
--
-- Examples:
--
-- > unfoldr (\x -> if x <= '9' then Just (x, succ x) else Nothing) '0' == "0123456789"
unfoldr :: (a -> Maybe (Char, a)) -> a -> ByteString
unfoldr f x0 = B.unfoldr (fmap k . f) x0
where k (i, j) = (c2w i, j)
-- | /O(n)/ Like 'unfoldr', 'unfoldrN' builds a ByteString from a seed
-- value. However, the length of the result is limited by the first
-- argument to 'unfoldrN'. This function is more efficient than 'unfoldr'
-- when the maximum length of the result is known.
--
-- The following equation relates 'unfoldrN' and 'unfoldr':
--
-- > unfoldrN n f s == take n (unfoldr f s)
unfoldrN :: Int -> (a -> Maybe (Char, a)) -> a -> (ByteString, Maybe a)
unfoldrN n f w = B.unfoldrN n ((k `fmap`) . f) w
where k (i,j) = (c2w i, j)
{-# INLINE unfoldrN #-}
-- | 'takeWhile', applied to a predicate @p@ and a ByteString @xs@,
-- returns the longest prefix (possibly empty) of @xs@ of elements that
-- satisfy @p@.
takeWhile :: (Char -> Bool) -> ByteString -> ByteString
takeWhile f = B.takeWhile (f . w2c)
{-# INLINE takeWhile #-}
-- | 'dropWhile' @p xs@ returns the suffix remaining after 'takeWhile' @p xs@.
dropWhile :: (Char -> Bool) -> ByteString -> ByteString
dropWhile f = B.dropWhile (f . w2c)
#if defined(__GLASGOW_HASKELL__)
{-# INLINE [1] dropWhile #-}
#endif
{-# RULES
"ByteString specialise dropWhile isSpace -> dropSpace"
dropWhile isSpace = dropSpace
#-}
-- | 'break' @p@ is equivalent to @'span' ('not' . p)@.
break :: (Char -> Bool) -> ByteString -> (ByteString, ByteString)
break f = B.break (f . w2c)
#if defined(__GLASGOW_HASKELL__)
{-# INLINE [1] break #-}
#endif
{-# RULES
"ByteString specialise break (x==)" forall x.
break ((==) x) = breakChar x
"ByteString specialise break (==x)" forall x.
break (==x) = breakChar x
#-}
-- INTERNAL:
-- | 'breakChar' breaks its ByteString argument at the first occurence
-- of the specified char. It is more efficient than 'break' as it is
-- implemented with @memchr(3)@. I.e.
--
-- > break (=='c') "abcd" == breakChar 'c' "abcd"
--
breakChar :: Char -> ByteString -> (ByteString, ByteString)
breakChar c p = case elemIndex c p of
Nothing -> (p,empty)
Just n -> (B.unsafeTake n p, B.unsafeDrop n p)
{-# INLINE breakChar #-}
-- | 'span' @p xs@ breaks the ByteString into two segments. It is
-- equivalent to @('takeWhile' p xs, 'dropWhile' p xs)@
span :: (Char -> Bool) -> ByteString -> (ByteString, ByteString)
span f = B.span (f . w2c)
{-# INLINE span #-}
-- | 'spanEnd' behaves like 'span' but from the end of the 'ByteString'.
-- We have
--
-- > spanEnd (not.isSpace) "x y z" == ("x y ","z")
--
-- and
--
-- > spanEnd (not . isSpace) ps
-- > ==
-- > let (x,y) = span (not.isSpace) (reverse ps) in (reverse y, reverse x)
--
spanEnd :: (Char -> Bool) -> ByteString -> (ByteString, ByteString)
spanEnd f = B.spanEnd (f . w2c)
{-# INLINE spanEnd #-}
-- | 'breakEnd' behaves like 'break' but from the end of the 'ByteString'
--
-- breakEnd p == spanEnd (not.p)
breakEnd :: (Char -> Bool) -> ByteString -> (ByteString, ByteString)
breakEnd f = B.breakEnd (f . w2c)
{-# INLINE breakEnd #-}
{-
-- | 'breakChar' breaks its ByteString argument at the first occurence
-- of the specified Char. It is more efficient than 'break' as it is
-- implemented with @memchr(3)@. I.e.
--
-- > break (=='c') "abcd" == breakChar 'c' "abcd"
--
breakChar :: Char -> ByteString -> (ByteString, ByteString)
breakChar = B.breakByte . c2w
{-# INLINE breakChar #-}
-- | 'spanChar' breaks its ByteString argument at the first
-- occurence of a Char other than its argument. It is more efficient
-- than 'span (==)'
--
-- > span (=='c') "abcd" == spanByte 'c' "abcd"
--
spanChar :: Char -> ByteString -> (ByteString, ByteString)
spanChar = B.spanByte . c2w
{-# INLINE spanChar #-}
-}
-- | /O(n)/ Break a 'ByteString' into pieces separated by the byte
-- argument, consuming the delimiter. I.e.
--
-- > split '\n' "a\nb\nd\ne" == ["a","b","d","e"]
-- > split 'a' "aXaXaXa" == ["","X","X","X",""]
-- > split 'x' "x" == ["",""]
--
-- and
--
-- > intercalate [c] . split c == id
-- > split == splitWith . (==)
--
-- As for all splitting functions in this library, this function does
-- not copy the substrings, it just constructs new 'ByteStrings' that
-- are slices of the original.
--
split :: Char -> ByteString -> [ByteString]
split = B.split . c2w
{-# INLINE split #-}
-- | /O(n)/ Splits a 'ByteString' into components delimited by
-- separators, where the predicate returns True for a separator element.
-- The resulting components do not contain the separators. Two adjacent
-- separators result in an empty component in the output. eg.
--
-- > splitWith (=='a') "aabbaca" == ["","","bb","c",""]
--
splitWith :: (Char -> Bool) -> ByteString -> [ByteString]
splitWith f = B.splitWith (f . w2c)
{-# INLINE splitWith #-}
-- the inline makes a big difference here.
{-
-- | Like 'splitWith', except that sequences of adjacent separators are
-- treated as a single separator. eg.
--
-- > tokens (=='a') "aabbaca" == ["bb","c"]
--
tokens :: (Char -> Bool) -> ByteString -> [ByteString]
tokens f = B.tokens (f . w2c)
{-# INLINE tokens #-}
-}
-- | The 'groupBy' function is the non-overloaded version of 'group'.
groupBy :: (Char -> Char -> Bool) -> ByteString -> [ByteString]
groupBy k = B.groupBy (\a b -> k (w2c a) (w2c b))
-- | /O(1)/ 'ByteString' index (subscript) operator, starting from 0.
index :: ByteString -> Int -> Char
index = (w2c .) . B.index
{-# INLINE index #-}
-- | /O(n)/ The 'elemIndex' function returns the index of the first
-- element in the given 'ByteString' which is equal (by memchr) to the
-- query element, or 'Nothing' if there is no such element.
elemIndex :: Char -> ByteString -> Maybe Int
elemIndex = B.elemIndex . c2w
{-# INLINE elemIndex #-}
-- | /O(n)/ The 'elemIndexEnd' function returns the last index of the
-- element in the given 'ByteString' which is equal to the query
-- element, or 'Nothing' if there is no such element. The following
-- holds:
--
-- > elemIndexEnd c xs ==
-- > (-) (length xs - 1) `fmap` elemIndex c (reverse xs)
--
elemIndexEnd :: Char -> ByteString -> Maybe Int
elemIndexEnd = B.elemIndexEnd . c2w
{-# INLINE elemIndexEnd #-}
-- | /O(n)/ The 'elemIndices' function extends 'elemIndex', by returning
-- the indices of all elements equal to the query element, in ascending order.
elemIndices :: Char -> ByteString -> [Int]
elemIndices = B.elemIndices . c2w
{-# INLINE elemIndices #-}
-- | The 'findIndex' function takes a predicate and a 'ByteString' and
-- returns the index of the first element in the ByteString satisfying the predicate.
findIndex :: (Char -> Bool) -> ByteString -> Maybe Int
findIndex f = B.findIndex (f . w2c)
{-# INLINE findIndex #-}
-- | The 'findIndices' function extends 'findIndex', by returning the
-- indices of all elements satisfying the predicate, in ascending order.
findIndices :: (Char -> Bool) -> ByteString -> [Int]
findIndices f = B.findIndices (f . w2c)
-- | count returns the number of times its argument appears in the ByteString
--
-- > count = length . elemIndices
--
-- Also
--
-- > count '\n' == length . lines
--
-- But more efficiently than using length on the intermediate list.
count :: Char -> ByteString -> Int
count c = B.count (c2w c)
-- | /O(n)/ 'elem' is the 'ByteString' membership predicate. This
-- implementation uses @memchr(3)@.
elem :: Char -> ByteString -> Bool
elem c = B.elem (c2w c)
{-# INLINE elem #-}
-- | /O(n)/ 'notElem' is the inverse of 'elem'
notElem :: Char -> ByteString -> Bool
notElem c = B.notElem (c2w c)
{-# INLINE notElem #-}
-- | /O(n)/ 'filter', applied to a predicate and a ByteString,
-- returns a ByteString containing those characters that satisfy the
-- predicate.
filter :: (Char -> Bool) -> ByteString -> ByteString
filter f = B.filter (f . w2c)
{-# INLINE filter #-}
{-
-- | /O(n)/ and /O(n\/c) space/ A first order equivalent of /filter .
-- (==)/, for the common case of filtering a single Char. It is more
-- efficient to use /filterChar/ in this case.
--
-- > filterChar == filter . (==)
--
-- filterChar is around 10x faster, and uses much less space, than its
-- filter equivalent
--
filterChar :: Char -> ByteString -> ByteString
filterChar c ps = replicate (count c ps) c
{-# INLINE filterChar #-}
{-# RULES
"ByteString specialise filter (== x)" forall x.
filter ((==) x) = filterChar x
"ByteString specialise filter (== x)" forall x.
filter (== x) = filterChar x
#-}
-}
-- | /O(n)/ The 'find' function takes a predicate and a ByteString,
-- and returns the first element in matching the predicate, or 'Nothing'
-- if there is no such element.
find :: (Char -> Bool) -> ByteString -> Maybe Char
find f ps = w2c `fmap` B.find (f . w2c) ps
{-# INLINE find #-}
{-
-- | /O(n)/ A first order equivalent of /filter . (==)/, for the common
-- case of filtering a single Char. It is more efficient to use
-- filterChar in this case.
--
-- > filterChar == filter . (==)
--
-- filterChar is around 10x faster, and uses much less space, than its
-- filter equivalent
--
filterChar :: Char -> ByteString -> ByteString
filterChar c = B.filterByte (c2w c)
{-# INLINE filterChar #-}
-- | /O(n)/ A first order equivalent of /filter . (\/=)/, for the common
-- case of filtering a single Char out of a list. It is more efficient
-- to use /filterNotChar/ in this case.
--
-- > filterNotChar == filter . (/=)
--
-- filterNotChar is around 3x faster, and uses much less space, than its
-- filter equivalent
--
filterNotChar :: Char -> ByteString -> ByteString
filterNotChar c = B.filterNotByte (c2w c)
{-# INLINE filterNotChar #-}
-}
-- | /O(n)/ 'zip' takes two ByteStrings and returns a list of
-- corresponding pairs of Chars. If one input ByteString is short,
-- excess elements of the longer ByteString are discarded. This is
-- equivalent to a pair of 'unpack' operations, and so space
-- usage may be large for multi-megabyte ByteStrings
zip :: ByteString -> ByteString -> [(Char,Char)]
zip ps qs
| B.null ps || B.null qs = []
| otherwise = (unsafeHead ps, unsafeHead qs) : zip (B.unsafeTail ps) (B.unsafeTail qs)
-- | 'zipWith' generalises 'zip' by zipping with the function given as
-- the first argument, instead of a tupling function. For example,
-- @'zipWith' (+)@ is applied to two ByteStrings to produce the list
-- of corresponding sums.
zipWith :: (Char -> Char -> a) -> ByteString -> ByteString -> [a]
zipWith f = B.zipWith ((. w2c) . f . w2c)
-- | 'unzip' transforms a list of pairs of Chars into a pair of
-- ByteStrings. Note that this performs two 'pack' operations.
unzip :: [(Char,Char)] -> (ByteString,ByteString)
unzip ls = (pack (P.map fst ls), pack (P.map snd ls))
{-# INLINE unzip #-}
-- | A variety of 'head' for non-empty ByteStrings. 'unsafeHead' omits
-- the check for the empty case, which is good for performance, but
-- there is an obligation on the programmer to provide a proof that the
-- ByteString is non-empty.
unsafeHead :: ByteString -> Char
unsafeHead = w2c . B.unsafeHead
{-# INLINE unsafeHead #-}
-- ---------------------------------------------------------------------
-- Things that depend on the encoding
{-# RULES
"ByteString specialise break -> breakSpace"
break isSpace = breakSpace
#-}
-- | 'breakSpace' returns the pair of ByteStrings when the argument is
-- broken at the first whitespace byte. I.e.
--
-- > break isSpace == breakSpace
--
breakSpace :: ByteString -> (ByteString,ByteString)
breakSpace (PS x s l) = inlinePerformIO $ withForeignPtr x $ \p -> do
i <- firstspace (p `plusPtr` s) 0 l
return $! case () of {_
| i == 0 -> (empty, PS x s l)
| i == l -> (PS x s l, empty)
| otherwise -> (PS x s i, PS x (s+i) (l-i))
}
{-# INLINE breakSpace #-}
firstspace :: Ptr Word8 -> Int -> Int -> IO Int
STRICT3(firstspace)
firstspace ptr n m
| n >= m = return n
| otherwise = do w <- peekByteOff ptr n
if (not . isSpaceWord8) w then firstspace ptr (n+1) m else return n
-- | 'dropSpace' efficiently returns the 'ByteString' argument with
-- white space Chars removed from the front. It is more efficient than
-- calling dropWhile for removing whitespace. I.e.
--
-- > dropWhile isSpace == dropSpace
--
dropSpace :: ByteString -> ByteString
dropSpace (PS x s l) = inlinePerformIO $ withForeignPtr x $ \p -> do
i <- firstnonspace (p `plusPtr` s) 0 l
return $! if i == l then empty else PS x (s+i) (l-i)
{-# INLINE dropSpace #-}
firstnonspace :: Ptr Word8 -> Int -> Int -> IO Int
STRICT3(firstnonspace)
firstnonspace ptr n m
| n >= m = return n
| otherwise = do w <- peekElemOff ptr n
if isSpaceWord8 w then firstnonspace ptr (n+1) m else return n
{-
-- | 'dropSpaceEnd' efficiently returns the 'ByteString' argument with
-- white space removed from the end. I.e.
--
-- > reverse . (dropWhile isSpace) . reverse == dropSpaceEnd
--
-- but it is more efficient than using multiple reverses.
--
dropSpaceEnd :: ByteString -> ByteString
dropSpaceEnd (PS x s l) = inlinePerformIO $ withForeignPtr x $ \p -> do
i <- lastnonspace (p `plusPtr` s) (l-1)
return $! if i == (-1) then empty else PS x s (i+1)
{-# INLINE dropSpaceEnd #-}
lastnonspace :: Ptr Word8 -> Int -> IO Int
STRICT2(lastnonspace)
lastnonspace ptr n
| n < 0 = return n
| otherwise = do w <- peekElemOff ptr n
if isSpaceWord8 w then lastnonspace ptr (n-1) else return n
-}
-- | 'lines' breaks a ByteString up into a list of ByteStrings at
-- newline Chars. The resulting strings do not contain newlines.
--
lines :: ByteString -> [ByteString]
lines ps
| null ps = []
| otherwise = case search ps of
Nothing -> [ps]
Just n -> take n ps : lines (drop (n+1) ps)
where search = elemIndex '\n'
{-
-- Just as fast, but more complex. Should be much faster, I thought.
lines :: ByteString -> [ByteString]
lines (PS _ _ 0) = []
lines (PS x s l) = inlinePerformIO $ withForeignPtr x $ \p -> do
let ptr = p `plusPtr` s
STRICT1(loop)
loop n = do
let q = memchr (ptr `plusPtr` n) 0x0a (fromIntegral (l-n))
if q == nullPtr
then return [PS x (s+n) (l-n)]
else do let i = q `minusPtr` ptr
ls <- loop (i+1)
return $! PS x (s+n) (i-n) : ls
loop 0
-}
-- | 'unlines' is an inverse operation to 'lines'. It joins lines,
-- after appending a terminating newline to each.
unlines :: [ByteString] -> ByteString
unlines [] = empty
unlines ss = (concat $ List.intersperse nl ss) `append` nl -- half as much space
where nl = singleton '\n'
-- | 'words' breaks a ByteString up into a list of words, which
-- were delimited by Chars representing white space.
words :: ByteString -> [ByteString]
words = P.filter (not . B.null) . B.splitWith isSpaceWord8
{-# INLINE words #-}
-- | The 'unwords' function is analogous to the 'unlines' function, on words.
unwords :: [ByteString] -> ByteString
unwords = intercalate (singleton ' ')
{-# INLINE unwords #-}
-- ---------------------------------------------------------------------
-- Reading from ByteStrings
-- | readInt reads an Int from the beginning of the ByteString. If there is no
-- integer at the beginning of the string, it returns Nothing, otherwise
-- it just returns the int read, and the rest of the string.
readInt :: ByteString -> Maybe (Int, ByteString)
readInt as
| null as = Nothing
| otherwise =
case unsafeHead as of
'-' -> loop True 0 0 (B.unsafeTail as)
'+' -> loop False 0 0 (B.unsafeTail as)
_ -> loop False 0 0 as
where loop :: Bool -> Int -> Int -> ByteString -> Maybe (Int, ByteString)
STRICT4(loop)
loop neg i n ps
| null ps = end neg i n ps
| otherwise =
case B.unsafeHead ps of
w | w >= 0x30
&& w <= 0x39 -> loop neg (i+1)
(n * 10 + (fromIntegral w - 0x30))
(B.unsafeTail ps)
| otherwise -> end neg i n ps
end _ 0 _ _ = Nothing
end True _ n ps = Just (negate n, ps)
end _ _ n ps = Just (n, ps)
-- | readInteger reads an Integer from the beginning of the ByteString. If
-- there is no integer at the beginning of the string, it returns Nothing,
-- otherwise it just returns the int read, and the rest of the string.
readInteger :: ByteString -> Maybe (Integer, ByteString)
readInteger as
| null as = Nothing
| otherwise =
case unsafeHead as of
'-' -> first (B.unsafeTail as) >>= \(n, bs) -> return (-n, bs)
'+' -> first (B.unsafeTail as)
_ -> first as
where first ps | null ps = Nothing
| otherwise =
case B.unsafeHead ps of
w | w >= 0x30 && w <= 0x39 -> Just $
loop 1 (fromIntegral w - 0x30) [] (B.unsafeTail ps)
| otherwise -> Nothing
loop :: Int -> Int -> [Integer]
-> ByteString -> (Integer, ByteString)
STRICT4(loop)
loop d acc ns ps
| null ps = combine d acc ns empty
| otherwise =
case B.unsafeHead ps of
w | w >= 0x30 && w <= 0x39 ->
if d == 9 then loop 1 (fromIntegral w - 0x30)
(toInteger acc : ns)
(B.unsafeTail ps)
else loop (d+1)
(10*acc + (fromIntegral w - 0x30))
ns (B.unsafeTail ps)
| otherwise -> combine d acc ns ps
combine _ acc [] ps = (toInteger acc, ps)
combine d acc ns ps =
((10^d * combine1 1000000000 ns + toInteger acc), ps)
combine1 _ [n] = n
combine1 b ns = combine1 (b*b) $ combine2 b ns
combine2 b (n:m:ns) = let t = m*b + n in t `seq` (t : combine2 b ns)
combine2 _ ns = ns
------------------------------------------------------------------------
-- For non-binary text processing:
-- | Read an entire file strictly into a 'ByteString'. This is far more
-- efficient than reading the characters into a 'String' and then using
-- 'pack'. It also may be more efficient than opening the file and
-- reading it using hGet.
readFile :: FilePath -> IO ByteString
readFile f = bracket (openBinaryFile f ReadMode) hClose
(\h -> hFileSize h >>= hGet h . fromIntegral)
-- | Write a 'ByteString' to a file.
writeFile :: FilePath -> ByteString -> IO ()
writeFile f txt = bracket (openBinaryFile f WriteMode) hClose
(\h -> hPut h txt)
-- | Append a 'ByteString' to a file.
appendFile :: FilePath -> ByteString -> IO ()
appendFile f txt = bracket (openBinaryFile f AppendMode) hClose
(\h -> hPut h txt)
-- | Write a ByteString to a handle, appending a newline byte
hPutStrLn :: Handle -> ByteString -> IO ()
hPutStrLn h ps
| length ps < 1024 = hPut h (ps `B.snoc` 0x0a)
| otherwise = hPut h ps >> hPut h (B.singleton (0x0a)) -- don't copy
-- | Write a ByteString to stdout, appending a newline byte
putStrLn :: ByteString -> IO ()
putStrLn = hPutStrLn stdout
|
markflorisson/hpack
|
testrepo/bytestring-0.10.2.0/Data/ByteString/Char8.hs
|
Haskell
|
bsd-3-clause
| 40,569
|
{-# LANGUAGE DuplicateRecordFields #-}
{-# LANGUAGE TemplateHaskell #-}
module Language.LSP.Types.SelectionRange where
import Data.Aeson.TH
import Language.LSP.Types.Common
import Language.LSP.Types.Location
import Language.LSP.Types.Progress
import Language.LSP.Types.StaticRegistrationOptions
import Language.LSP.Types.TextDocument
import Language.LSP.Types.Utils
data SelectionRangeClientCapabilities = SelectionRangeClientCapabilities
{ -- | Whether implementation supports dynamic registration for selection range providers. If this is set to 'True'
-- the client supports the new 'SelectionRangeRegistrationOptions' return value for the corresponding server
-- capability as well.
_dynamicRegistration :: Maybe Bool
}
deriving (Read, Show, Eq)
deriveJSON lspOptions ''SelectionRangeClientCapabilities
makeExtendingDatatype "SelectionRangeOptions" [''WorkDoneProgressOptions] []
deriveJSON lspOptions ''SelectionRangeOptions
makeExtendingDatatype
"SelectionRangeRegistrationOptions"
[ ''SelectionRangeOptions,
''TextDocumentRegistrationOptions,
''StaticRegistrationOptions
]
[]
deriveJSON lspOptions ''SelectionRangeRegistrationOptions
makeExtendingDatatype
"SelectionRangeParams"
[ ''WorkDoneProgressParams,
''PartialResultParams
]
[ ("_textDocument", [t|TextDocumentIdentifier|]),
("_positions", [t|List Position|])
]
deriveJSON lspOptions ''SelectionRangeParams
data SelectionRange = SelectionRange
{ -- | The 'range' of this selection range.
_range :: Range,
-- | The parent selection range containing this range. Therefore @parent.range@ must contain @this.range@.
_parent :: Maybe SelectionRange
}
deriving (Read, Show, Eq)
deriveJSON lspOptions ''SelectionRange
|
alanz/haskell-lsp
|
lsp-types/src/Language/LSP/Types/SelectionRange.hs
|
Haskell
|
mit
| 1,753
|
-- |
-- Description : The convinience module to re-export public definitions
-- Copyright : (c) 2015 Egor Tensin <Egor.Tensin@gmail.com>
-- License : MIT
-- Maintainer : Egor.Tensin@gmail.com
-- Stability : experimental
-- Portability : Windows-only
--
-- An empty module to re-export everything required by the packaged
-- applications.
module WindowsEnv
( module WindowsEnv.Environment
) where
import WindowsEnv.Environment
|
egor-tensin/windows-env
|
src/WindowsEnv.hs
|
Haskell
|
mit
| 445
|
module Data.Wright.Types where
import Data.Vector (Vector(..), vmap)
data XYZ t = XYZ t t t deriving (Show)
data LAB t = LAB t t t deriving (Show)
data RGB t = RGB t t t deriving (Show)
data Yxy t = Yxy t t t deriving (Show) -- "xyY"
type ℝ = Double
data Gamma = Gamma ℝ | LStar | SRGB
data Application = Graphics | Textiles --CIE94
data Model = Model
{ gamma :: Gamma
, white :: XYZ ℝ
, red :: Primary
, green :: Primary
, blue :: Primary
}
data Primary = Primary
{ x :: ℝ
, y :: ℝ
, z :: ℝ
}
instance Vector XYZ where
toVector (XYZ x y z) = (x, y, z)
fromVector = uncurry3 XYZ
instance Functor XYZ where
fmap = vmap
instance Vector LAB where
toVector (LAB l a b) = (l, a, b)
fromVector = uncurry3 LAB
instance Functor LAB where
fmap = vmap
instance Vector RGB where
toVector (RGB r g b) = (r, g, b)
fromVector = uncurry3 RGB
instance Functor RGB where
fmap = vmap
instance Vector Yxy where
toVector (Yxy y' x y) = (y', x, y)
fromVector = uncurry3 Yxy
instance Functor Yxy where
fmap = vmap
uncurry3 :: (a -> b -> c -> d) -> (a, b, c) -> d
uncurry3 f (a, b, c) = f a b c
|
fmap-archive/wright
|
src/Data/Wright/Types.hs
|
Haskell
|
mit
| 1,149
|
module System.LookupEnv (lookupEnv) where
import System.Environment (getEnvironment)
lookupEnv :: String -> IO (Maybe String)
lookupEnv envVar = fmap (lookup envVar) $ getEnvironment
|
yesodweb/clientsession
|
src/System/LookupEnv.hs
|
Haskell
|
mit
| 185
|
{-# LANGUAGE CPP #-}
{-# LANGUAGE ConstraintKinds #-}
{-# LANGUAGE FlexibleContexts #-}
{-# LANGUAGE MultiParamTypeClasses #-}
{-# LANGUAGE OverloadedStrings #-}
{-# LANGUAGE RecordWildCards #-}
{-# LANGUAGE TemplateHaskell #-}
-- | Perform a build
module Stack.Build.Execute
( printPlan
, preFetch
, executePlan
-- * Running Setup.hs
, ExecuteEnv
, withExecuteEnv
, withSingleContext
) where
import Control.Applicative
import Control.Arrow ((&&&))
import Control.Concurrent.Async (withAsync, wait)
import Control.Concurrent.Execute
import Control.Concurrent.MVar.Lifted
import Control.Concurrent.STM
import Control.Exception.Enclosed (catchIO, tryIO)
import Control.Exception.Lifted
import Control.Monad (liftM, when, unless, void, join, filterM, (<=<))
import Control.Monad.Catch (MonadCatch, MonadMask)
import Control.Monad.IO.Class
import Control.Monad.Logger
import Control.Monad.Reader (MonadReader, asks)
import Control.Monad.Trans.Control (liftBaseWith)
import Control.Monad.Trans.Resource
import Data.Attoparsec.Text
import qualified Data.ByteString as S
import Data.Conduit
import qualified Data.Conduit.Binary as CB
import qualified Data.Conduit.List as CL
import qualified Data.Conduit.Text as CT
import Data.Either (isRight)
import Data.Foldable (forM_, any)
import Data.Function
import Data.IORef.RunOnce (runOnce)
import Data.List hiding (any)
import Data.Map.Strict (Map)
import qualified Data.Map.Strict as Map
import Data.Maybe
import Data.Maybe.Extra (forMaybeM)
import Data.Monoid ((<>))
import Data.Set (Set)
import qualified Data.Set as Set
import qualified Data.Streaming.Process as Process
import Data.Streaming.Process hiding (callProcess, env)
import Data.Text (Text)
import qualified Data.Text as T
import Data.Text.Encoding (decodeUtf8)
import Data.Time.Clock (getCurrentTime)
import Data.Traversable (forM)
import qualified Distribution.PackageDescription as C
import Distribution.System (OS (Windows),
Platform (Platform))
import Language.Haskell.TH as TH (location)
import Network.HTTP.Client.Conduit (HasHttpManager)
import Path
import Path.Extra (toFilePathNoTrailingSep)
import Path.IO
import Prelude hiding (FilePath, writeFile, any)
import Stack.Build.Cache
import Stack.Build.Haddock
import Stack.Build.Installed
import Stack.Build.Source
import Stack.Constants
import Stack.Coverage
import Stack.Fetch as Fetch
import Stack.GhcPkg
import Stack.Package
import Stack.PackageDump
import Stack.Types
import Stack.Types.Internal
import Stack.Types.StackT
import qualified System.Directory as D
import System.Environment (getExecutablePath)
import System.Exit (ExitCode (ExitSuccess))
import qualified System.FilePath as FP
import System.IO
import System.PosixCompat.Files (createLink)
import System.Process.Log (showProcessArgDebug)
import System.Process.Read
import System.Process.Run
#if !MIN_VERSION_process(1,2,1)
import System.Process.Internals (createProcess_)
#endif
type M env m = (MonadIO m,MonadReader env m,HasHttpManager env,HasBuildConfig env,MonadLogger m,MonadBaseControl IO m,MonadCatch m,MonadMask m,HasLogLevel env,HasEnvConfig env,HasTerminal env, HasConfig env)
-- | Fetch the packages necessary for a build, for example in combination with a dry run.
preFetch :: M env m => Plan -> m ()
preFetch plan
| Set.null idents = $logDebug "Nothing to fetch"
| otherwise = do
$logDebug $ T.pack $
"Prefetching: " ++
intercalate ", " (map packageIdentifierString $ Set.toList idents)
menv <- getMinimalEnvOverride
fetchPackages menv idents
where
idents = Set.unions $ map toIdent $ Map.toList $ planTasks plan
toIdent (name, task) =
case taskType task of
TTLocal _ -> Set.empty
TTUpstream package _ -> Set.singleton $ PackageIdentifier
name
(packageVersion package)
-- | Print a description of build plan for human consumption.
printPlan :: M env m
=> Plan
-> m ()
printPlan plan = do
case Map.elems $ planUnregisterLocal plan of
[] -> $logInfo "No packages would be unregistered."
xs -> do
$logInfo "Would unregister locally:"
forM_ xs $ \(ident, mreason) -> $logInfo $ T.concat
[ T.pack $ packageIdentifierString ident
, case mreason of
Nothing -> ""
Just reason -> T.concat
[ " ("
, reason
, ")"
]
]
$logInfo ""
case Map.elems $ planTasks plan of
[] -> $logInfo "Nothing to build."
xs -> do
$logInfo "Would build:"
mapM_ ($logInfo . displayTask) xs
let hasTests = not . Set.null . testComponents . taskComponents
hasBenches = not . Set.null . benchComponents . taskComponents
tests = Map.elems $ Map.filter hasTests $ planFinals plan
benches = Map.elems $ Map.filter hasBenches $ planFinals plan
unless (null tests) $ do
$logInfo ""
$logInfo "Would test:"
mapM_ ($logInfo . displayTask) tests
unless (null benches) $ do
$logInfo ""
$logInfo "Would benchmark:"
mapM_ ($logInfo . displayTask) benches
$logInfo ""
case Map.toList $ planInstallExes plan of
[] -> $logInfo "No executables to be installed."
xs -> do
$logInfo "Would install executables:"
forM_ xs $ \(name, loc) -> $logInfo $ T.concat
[ name
, " from "
, case loc of
Snap -> "snapshot"
Local -> "local"
, " database"
]
-- | For a dry run
displayTask :: Task -> Text
displayTask task = T.pack $ concat
[ packageIdentifierString $ taskProvides task
, ": database="
, case taskLocation task of
Snap -> "snapshot"
Local -> "local"
, ", source="
, case taskType task of
TTLocal lp -> concat
[ toFilePath $ lpDir lp
]
TTUpstream _ _ -> "package index"
, if Set.null missing
then ""
else ", after: " ++ intercalate "," (map packageIdentifierString $ Set.toList missing)
]
where
missing = tcoMissing $ taskConfigOpts task
data ExecuteEnv = ExecuteEnv
{ eeEnvOverride :: !EnvOverride
, eeConfigureLock :: !(MVar ())
, eeInstallLock :: !(MVar ())
, eeBuildOpts :: !BuildOpts
, eeBaseConfigOpts :: !BaseConfigOpts
, eeGhcPkgIds :: !(TVar (Map PackageIdentifier Installed))
, eeTempDir :: !(Path Abs Dir)
, eeSetupHs :: !(Path Abs File)
-- ^ Temporary Setup.hs for simple builds
, eeSetupExe :: !(Maybe (Path Abs File))
-- ^ Compiled version of eeSetupHs
, eeCabalPkgVer :: !Version
, eeTotalWanted :: !Int
, eeWanted :: !(Set PackageName)
, eeLocals :: ![LocalPackage]
, eeGlobalDB :: !(Path Abs Dir)
, eeGlobalDumpPkgs :: !(Map GhcPkgId (DumpPackage () ()))
, eeSnapshotDumpPkgs :: !(TVar (Map GhcPkgId (DumpPackage () ())))
, eeLocalDumpPkgs :: !(TVar (Map GhcPkgId (DumpPackage () ())))
}
-- | Get a compiled Setup exe
getSetupExe :: M env m
=> Path Abs File -- ^ Setup.hs input file
-> Path Abs Dir -- ^ temporary directory
-> m (Maybe (Path Abs File))
getSetupExe setupHs tmpdir = do
wc <- getWhichCompiler
econfig <- asks getEnvConfig
platformDir <- platformGhcRelDir
let config = getConfig econfig
baseNameS = concat
[ "setup-Simple-Cabal-"
, versionString $ envConfigCabalVersion econfig
, "-"
, compilerVersionString $ envConfigCompilerVersion econfig
]
exeNameS = baseNameS ++
case configPlatform config of
Platform _ Windows -> ".exe"
_ -> ""
outputNameS =
case wc of
Ghc -> exeNameS
Ghcjs -> baseNameS ++ ".jsexe"
jsExeNameS =
baseNameS ++ ".jsexe"
setupDir =
configStackRoot config </>
$(mkRelDir "setup-exe-cache") </>
platformDir
exePath <- fmap (setupDir </>) $ parseRelFile exeNameS
jsExePath <- fmap (setupDir </>) $ parseRelDir jsExeNameS
exists <- liftIO $ D.doesFileExist $ toFilePath exePath
if exists
then return $ Just exePath
else do
tmpExePath <- fmap (setupDir </>) $ parseRelFile $ "tmp-" ++ exeNameS
tmpOutputPath <- fmap (setupDir </>) $ parseRelFile $ "tmp-" ++ outputNameS
tmpJsExePath <- fmap (setupDir </>) $ parseRelDir $ "tmp-" ++ jsExeNameS
liftIO $ D.createDirectoryIfMissing True $ toFilePath setupDir
menv <- getMinimalEnvOverride
let args =
[ "-clear-package-db"
, "-global-package-db"
, "-hide-all-packages"
, "-package"
, "base"
, "-package"
, "Cabal-" ++ versionString (envConfigCabalVersion econfig)
, toFilePath setupHs
, "-o"
, toFilePath tmpOutputPath
] ++
["-build-runner" | wc == Ghcjs]
runCmd' (\cp -> cp { std_out = UseHandle stderr }) (Cmd (Just tmpdir) (compilerExeName wc) menv args) Nothing
when (wc == Ghcjs) $ renameDir tmpJsExePath jsExePath
renameFile tmpExePath exePath
return $ Just exePath
-- | Execute a callback that takes an 'ExecuteEnv'.
withExecuteEnv :: M env m
=> EnvOverride
-> BuildOpts
-> BaseConfigOpts
-> [LocalPackage]
-> [DumpPackage () ()] -- ^ global packages
-> [DumpPackage () ()] -- ^ snapshot packages
-> [DumpPackage () ()] -- ^ local packages
-> (ExecuteEnv -> m a)
-> m a
withExecuteEnv menv bopts baseConfigOpts locals globalPackages snapshotPackages localPackages inner = do
withCanonicalizedSystemTempDirectory stackProgName $ \tmpdir -> do
configLock <- newMVar ()
installLock <- newMVar ()
idMap <- liftIO $ newTVarIO Map.empty
let setupHs = tmpdir </> $(mkRelFile "Setup.hs")
liftIO $ writeFile (toFilePath setupHs) "import Distribution.Simple\nmain = defaultMain"
setupExe <- getSetupExe setupHs tmpdir
cabalPkgVer <- asks (envConfigCabalVersion . getEnvConfig)
globalDB <- getGlobalDB menv =<< getWhichCompiler
snapshotPackagesTVar <- liftIO $ newTVarIO (toDumpPackagesByGhcPkgId snapshotPackages)
localPackagesTVar <- liftIO $ newTVarIO (toDumpPackagesByGhcPkgId localPackages)
inner ExecuteEnv
{ eeEnvOverride = menv
, eeBuildOpts = bopts
-- Uncertain as to why we cannot run configures in parallel. This appears
-- to be a Cabal library bug. Original issue:
-- https://github.com/fpco/stack/issues/84. Ideally we'd be able to remove
-- this.
, eeConfigureLock = configLock
, eeInstallLock = installLock
, eeBaseConfigOpts = baseConfigOpts
, eeGhcPkgIds = idMap
, eeTempDir = tmpdir
, eeSetupHs = setupHs
, eeSetupExe = setupExe
, eeCabalPkgVer = cabalPkgVer
, eeTotalWanted = length $ filter lpWanted locals
, eeWanted = wantedLocalPackages locals
, eeLocals = locals
, eeGlobalDB = globalDB
, eeGlobalDumpPkgs = toDumpPackagesByGhcPkgId globalPackages
, eeSnapshotDumpPkgs = snapshotPackagesTVar
, eeLocalDumpPkgs = localPackagesTVar
}
where
toDumpPackagesByGhcPkgId = Map.fromList . map (\dp -> (dpGhcPkgId dp, dp))
-- | Perform the actual plan
executePlan :: M env m
=> EnvOverride
-> BuildOpts
-> BaseConfigOpts
-> [LocalPackage]
-> [DumpPackage () ()] -- ^ global packages
-> [DumpPackage () ()] -- ^ snapshot packages
-> [DumpPackage () ()] -- ^ local packages
-> InstalledMap
-> Plan
-> m ()
executePlan menv bopts baseConfigOpts locals globalPackages snapshotPackages localPackages installedMap plan = do
withExecuteEnv menv bopts baseConfigOpts locals globalPackages snapshotPackages localPackages (executePlan' installedMap plan)
unless (Map.null $ planInstallExes plan) $ do
snapBin <- (</> bindirSuffix) `liftM` installationRootDeps
localBin <- (</> bindirSuffix) `liftM` installationRootLocal
destDir <- asks $ configLocalBin . getConfig
createTree destDir
destDir' <- liftIO . D.canonicalizePath . toFilePath $ destDir
isInPATH <- liftIO . fmap (any (FP.equalFilePath destDir')) . (mapM D.canonicalizePath <=< filterM D.doesDirectoryExist) $ (envSearchPath menv)
when (not isInPATH) $
$logWarn $ T.concat
[ "Installation path "
, T.pack destDir'
, " not found in PATH environment variable"
]
platform <- asks getPlatform
let ext =
case platform of
Platform _ Windows -> ".exe"
_ -> ""
currExe <- liftIO getExecutablePath -- needed for windows, see below
installed <- forMaybeM (Map.toList $ planInstallExes plan) $ \(name, loc) -> do
let bindir =
case loc of
Snap -> snapBin
Local -> localBin
mfp <- resolveFileMaybe bindir $ T.unpack name ++ ext
case mfp of
Nothing -> do
$logWarn $ T.concat
[ "Couldn't find executable "
, name
, " in directory "
, T.pack $ toFilePath bindir
]
return Nothing
Just file -> do
let destFile = destDir' FP.</> T.unpack name ++ ext
$logInfo $ T.concat
[ "Copying from "
, T.pack $ toFilePath file
, " to "
, T.pack destFile
]
liftIO $ case platform of
Platform _ Windows | FP.equalFilePath destFile currExe ->
windowsRenameCopy (toFilePath file) destFile
_ -> D.copyFile (toFilePath file) destFile
return $ Just (destDir', [T.append name (T.pack ext)])
let destToInstalled = Map.fromListWith (++) installed
unless (Map.null destToInstalled) $ $logInfo ""
forM_ (Map.toList destToInstalled) $ \(dest, executables) -> do
$logInfo $ T.concat
[ "Copied executables to "
, T.pack dest
, ":"]
forM_ executables $ \exe -> $logInfo $ T.append "- " exe
config <- asks getConfig
menv' <- liftIO $ configEnvOverride config EnvSettings
{ esIncludeLocals = True
, esIncludeGhcPackagePath = True
, esStackExe = True
, esLocaleUtf8 = False
}
forM_ (boptsExec bopts) $ \(cmd, args) -> do
$logProcessRun cmd args
callProcess (Cmd Nothing cmd menv' args)
-- | Windows can't write over the current executable. Instead, we rename the
-- current executable to something else and then do the copy.
windowsRenameCopy :: FilePath -> FilePath -> IO ()
windowsRenameCopy src dest = do
D.copyFile src new
D.renameFile dest old
D.renameFile new dest
where
new = dest ++ ".new"
old = dest ++ ".old"
-- | Perform the actual plan (internal)
executePlan' :: M env m
=> InstalledMap
-> Plan
-> ExecuteEnv
-> m ()
executePlan' installedMap0 plan ee@ExecuteEnv {..} = do
when (toCoverage $ boptsTestOpts eeBuildOpts) deleteHpcReports
wc <- getWhichCompiler
cv <- asks $ envConfigCompilerVersion . getEnvConfig
case Map.toList $ planUnregisterLocal plan of
[] -> return ()
ids -> do
localDB <- packageDatabaseLocal
forM_ ids $ \(id', (ident, mreason)) -> do
$logInfo $ T.concat
[ T.pack $ packageIdentifierString ident
, ": unregistering"
, case mreason of
Nothing -> ""
Just reason -> T.concat
[ " ("
, reason
, ")"
]
]
unregisterGhcPkgId eeEnvOverride wc cv localDB id' ident
liftIO $ atomically $ modifyTVar' eeLocalDumpPkgs $ \initMap ->
foldl' (flip Map.delete) initMap $ Map.keys (planUnregisterLocal plan)
-- Yes, we're explicitly discarding result values, which in general would
-- be bad. monad-unlift does this all properly at the type system level,
-- but I don't want to pull it in for this one use case, when we know that
-- stack always using transformer stacks that are safe for this use case.
runInBase <- liftBaseWith $ \run -> return (void . run)
let actions = concatMap (toActions installedMap' runInBase ee) $ Map.elems $ Map.mergeWithKey
(\_ b f -> Just (Just b, Just f))
(fmap (\b -> (Just b, Nothing)))
(fmap (\f -> (Nothing, Just f)))
(planTasks plan)
(planFinals plan)
threads <- asks $ configJobs . getConfig
concurrentTests <- asks $ configConcurrentTests . getConfig
let keepGoing =
case boptsKeepGoing eeBuildOpts of
Just kg -> kg
Nothing -> boptsTests eeBuildOpts || boptsBenchmarks eeBuildOpts
concurrentFinal =
-- TODO it probably makes more sense to use a lock for test suites
-- and just have the execution blocked. Turning off all concurrency
-- on finals based on the --test option doesn't fit in well.
if boptsTests eeBuildOpts
then concurrentTests
else True
terminal <- asks getTerminal
errs <- liftIO $ runActions threads keepGoing concurrentFinal actions $ \doneVar -> do
let total = length actions
loop prev
| prev == total =
runInBase $ $logStickyDone ("Completed " <> T.pack (show total) <> " action(s).")
| otherwise = do
when terminal $ runInBase $
$logSticky ("Progress: " <> T.pack (show prev) <> "/" <> T.pack (show total))
done <- atomically $ do
done <- readTVar doneVar
check $ done /= prev
return done
loop done
if total > 1
then loop 0
else return ()
when (toCoverage $ boptsTestOpts eeBuildOpts) $ do
generateHpcUnifiedReport
generateHpcMarkupIndex
unless (null errs) $ throwM $ ExecutionFailure errs
when (boptsHaddock eeBuildOpts) $ do
snapshotDumpPkgs <- liftIO (readTVarIO eeSnapshotDumpPkgs)
localDumpPkgs <- liftIO (readTVarIO eeLocalDumpPkgs)
generateLocalHaddockIndex eeEnvOverride wc eeBaseConfigOpts localDumpPkgs eeLocals
generateDepsHaddockIndex eeEnvOverride wc eeBaseConfigOpts eeGlobalDumpPkgs snapshotDumpPkgs localDumpPkgs eeLocals
generateSnapHaddockIndex eeEnvOverride wc eeBaseConfigOpts eeGlobalDumpPkgs snapshotDumpPkgs
where
installedMap' = Map.difference installedMap0
$ Map.fromList
$ map (\(ident, _) -> (packageIdentifierName ident, ()))
$ Map.elems
$ planUnregisterLocal plan
toActions :: M env m
=> InstalledMap
-> (m () -> IO ())
-> ExecuteEnv
-> (Maybe Task, Maybe Task) -- build and final
-> [Action]
toActions installedMap runInBase ee (mbuild, mfinal) =
abuild ++ afinal
where
abuild =
case mbuild of
Nothing -> []
Just task@Task {..} ->
[ Action
{ actionId = ActionId taskProvides ATBuild
, actionDeps =
(Set.map (\ident -> ActionId ident ATBuild) (tcoMissing taskConfigOpts))
, actionDo = \ac -> runInBase $ singleBuild runInBase ac ee task installedMap False
}
]
afinal =
case mfinal of
Nothing -> []
Just task@Task {..} ->
(if taskAllInOne then [] else
[Action
{ actionId = ActionId taskProvides ATBuildFinal
, actionDeps = addBuild ATBuild
(Set.map (\ident -> ActionId ident ATBuild) (tcoMissing taskConfigOpts))
, actionDo = \ac -> runInBase $ singleBuild runInBase ac ee task installedMap True
}]) ++
[ Action
{ actionId = ActionId taskProvides ATFinal
, actionDeps = addBuild (if taskAllInOne then ATBuild else ATBuildFinal) Set.empty
, actionDo = \ac -> runInBase $ do
let comps = taskComponents task
tests = testComponents comps
benches = benchComponents comps
unless (Set.null tests) $ do
singleTest runInBase topts (Set.toList tests) ac ee task installedMap
unless (Set.null benches) $ do
singleBench runInBase beopts (Set.toList benches) ac ee task installedMap
}
]
where
addBuild aty =
case mbuild of
Nothing -> id
Just _ -> Set.insert $ ActionId taskProvides aty
bopts = eeBuildOpts ee
topts = boptsTestOpts bopts
beopts = boptsBenchmarkOpts bopts
-- | Generate the ConfigCache
getConfigCache :: MonadIO m
=> ExecuteEnv -> Task -> InstalledMap -> Bool -> Bool
-> m (Map PackageIdentifier GhcPkgId, ConfigCache)
getConfigCache ExecuteEnv {..} Task {..} installedMap enableTest enableBench = do
let extra =
-- We enable tests if the test suite dependencies are already
-- installed, so that we avoid unnecessary recompilation based on
-- cabal_macros.h changes when switching between 'stack build' and
-- 'stack test'. See:
-- https://github.com/commercialhaskell/stack/issues/805
case taskType of
TTLocal lp -> concat
[ ["--enable-tests" | enableTest || (depsPresent installedMap $ lpTestDeps lp)]
, ["--enable-benchmarks" | enableBench || (depsPresent installedMap $ lpBenchDeps lp)]
]
_ -> []
idMap <- liftIO $ readTVarIO eeGhcPkgIds
let getMissing ident =
case Map.lookup ident idMap of
Nothing -> error "singleBuild: invariant violated, missing package ID missing"
Just (Library ident' x) -> assert (ident == ident') $ Just (ident, x)
Just (Executable _) -> Nothing
missing' = Map.fromList $ mapMaybe getMissing $ Set.toList missing
TaskConfigOpts missing mkOpts = taskConfigOpts
opts = mkOpts missing'
allDeps = Set.fromList $ Map.elems missing' ++ Map.elems taskPresent
cache = ConfigCache
{ configCacheOpts = opts
{ coNoDirs = coNoDirs opts ++ map T.unpack extra
}
, configCacheDeps = allDeps
, configCacheComponents =
case taskType of
TTLocal lp -> Set.map renderComponent $ lpComponents lp
TTUpstream _ _ -> Set.empty
, configCacheHaddock =
shouldHaddockPackage eeBuildOpts eeWanted (packageIdentifierName taskProvides)
}
allDepsMap = Map.union missing' taskPresent
return (allDepsMap, cache)
-- | Ensure that the configuration for the package matches what is given
ensureConfig :: M env m
=> ConfigCache -- ^ newConfigCache
-> Path Abs Dir -- ^ package directory
-> ExecuteEnv
-> m () -- ^ announce
-> (Bool -> [String] -> m ()) -- ^ cabal
-> Path Abs File -- ^ .cabal file
-> m Bool
ensureConfig newConfigCache pkgDir ExecuteEnv {..} announce cabal cabalfp = do
newCabalMod <- liftIO (fmap modTime (D.getModificationTime (toFilePath cabalfp)))
needConfig <-
if boptsReconfigure eeBuildOpts
then return True
else do
-- Determine the old and new configuration in the local directory, to
-- determine if we need to reconfigure.
mOldConfigCache <- tryGetConfigCache pkgDir
mOldCabalMod <- tryGetCabalMod pkgDir
return $ fmap configCacheOpts mOldConfigCache /= Just (configCacheOpts newConfigCache)
|| mOldCabalMod /= Just newCabalMod
let ConfigureOpts dirs nodirs = configCacheOpts newConfigCache
when needConfig $ withMVar eeConfigureLock $ \_ -> do
deleteCaches pkgDir
announce
menv <- getMinimalEnvOverride
let programNames =
if eeCabalPkgVer < $(mkVersion "1.22")
then ["ghc", "ghc-pkg"]
else ["ghc", "ghc-pkg", "ghcjs", "ghcjs-pkg"]
exes <- forM programNames $ \name -> do
mpath <- findExecutable menv name
return $ case mpath of
Nothing -> []
Just x -> return $ concat ["--with-", name, "=", toFilePath x]
cabal False $ "configure" : concat
[ concat exes
, dirs
, nodirs
]
writeConfigCache pkgDir newConfigCache
writeCabalMod pkgDir newCabalMod
return needConfig
announceTask :: MonadLogger m => Task -> Text -> m ()
announceTask task x = $logInfo $ T.concat
[ T.pack $ packageIdentifierString $ taskProvides task
, ": "
, x
]
withSingleContext :: M env m
=> (m () -> IO ())
-> ActionContext
-> ExecuteEnv
-> Task
-> Maybe (Map PackageIdentifier GhcPkgId)
-- ^ All dependencies' package ids to provide to Setup.hs. If
-- Nothing, just provide global and snapshot package
-- databases.
-> Maybe String
-> ( Package
-> Path Abs File
-> Path Abs Dir
-> (Bool -> [String] -> m ())
-> (Text -> m ())
-> Bool
-> Maybe (Path Abs File, Handle)
-> m a)
-> m a
withSingleContext runInBase ActionContext {..} ExecuteEnv {..} task@Task {..} mdeps msuffix inner0 =
withPackage $ \package cabalfp pkgDir ->
withLogFile package $ \mlogFile ->
withCabal package pkgDir mlogFile $ \cabal ->
inner0 package cabalfp pkgDir cabal announce console mlogFile
where
announce = announceTask task
wanted =
case taskType of
TTLocal lp -> lpWanted lp
TTUpstream _ _ -> False
console = wanted
&& all (\(ActionId ident _) -> ident == taskProvides) (Set.toList acRemaining)
&& eeTotalWanted == 1
withPackage inner =
case taskType of
TTLocal lp -> inner (lpPackage lp) (lpCabalFile lp) (lpDir lp)
TTUpstream package _ -> do
mdist <- liftM Just distRelativeDir
m <- unpackPackageIdents eeEnvOverride eeTempDir mdist $ Set.singleton taskProvides
case Map.toList m of
[(ident, dir)]
| ident == taskProvides -> do
let name = packageIdentifierName taskProvides
cabalfpRel <- parseRelFile $ packageNameString name ++ ".cabal"
let cabalfp = dir </> cabalfpRel
inner package cabalfp dir
_ -> error $ "withPackage: invariant violated: " ++ show m
withLogFile package inner
| console = inner Nothing
| otherwise = do
logPath <- buildLogPath package msuffix
createTree (parent logPath)
let fp = toFilePath logPath
bracket
(liftIO $ openBinaryFile fp WriteMode)
(liftIO . hClose)
$ \h -> inner (Just (logPath, h))
withCabal package pkgDir mlogFile inner = do
config <- asks getConfig
let envSettings = EnvSettings
{ esIncludeLocals = taskLocation task == Local
, esIncludeGhcPackagePath = False
, esStackExe = False
, esLocaleUtf8 = True
}
menv <- liftIO $ configEnvOverride config envSettings
-- When looking for ghc to build Setup.hs we want to ignore local binaries, see:
-- https://github.com/commercialhaskell/stack/issues/1052
menvWithoutLocals <- liftIO $ configEnvOverride config envSettings { esIncludeLocals = False }
getGhcPath <- runOnce $ liftIO $ join $ findExecutable menvWithoutLocals "ghc"
getGhcjsPath <- runOnce $ liftIO $ join $ findExecutable menvWithoutLocals "ghcjs"
distRelativeDir' <- distRelativeDir
esetupexehs <-
-- Avoid broken Setup.hs files causing problems for simple build
-- types, see:
-- https://github.com/commercialhaskell/stack/issues/370
case (packageSimpleType package, eeSetupExe) of
(True, Just setupExe) -> return $ Left setupExe
_ -> liftIO $ fmap Right $ getSetupHs pkgDir
inner $ \stripTHLoading args -> do
let cabalPackageArg =
"-package=" ++ packageIdentifierString
(PackageIdentifier cabalPackageName
eeCabalPkgVer)
packageArgs =
case mdeps of
-- This branch is taken when
-- 'explicit-setup-deps' is requested in your
-- stack.yaml file.
Just deps | explicitSetupDeps (packageName package) config ->
-- Stack always builds with the global Cabal for various
-- reproducibility issues.
let depsMinusCabal
= map ghcPkgIdString
$ Set.toList
$ addGlobalPackages deps (Map.elems eeGlobalDumpPkgs)
in
( "-clear-package-db"
: "-global-package-db"
: map (("-package-db=" ++) . toFilePathNoTrailingSep) (bcoExtraDBs eeBaseConfigOpts)
) ++
( ("-package-db=" ++ toFilePathNoTrailingSep (bcoSnapDB eeBaseConfigOpts))
: ("-package-db=" ++ toFilePathNoTrailingSep (bcoLocalDB eeBaseConfigOpts))
: "-hide-all-packages"
: cabalPackageArg
: map ("-package-id=" ++) depsMinusCabal
)
-- This branch is usually taken for builds, and
-- is always taken for `stack sdist`.
--
-- This approach is debatable. It adds access to the
-- snapshot package database for Cabal. There are two
-- possible objections:
--
-- 1. This doesn't isolate the build enough; arbitrary
-- other packages available could cause the build to
-- succeed or fail.
--
-- 2. This doesn't provide enough packages: we should also
-- include the local database when building local packages.
--
-- Currently, this branch is only taken via `stack
-- sdist` or when explicitly requested in the
-- stack.yaml file.
_ ->
cabalPackageArg
: "-clear-package-db"
: "-global-package-db"
: map (("-package-db=" ++) . toFilePathNoTrailingSep) (bcoExtraDBs eeBaseConfigOpts)
++ ["-package-db=" ++ toFilePathNoTrailingSep (bcoSnapDB eeBaseConfigOpts)]
setupArgs = ("--builddir=" ++ toFilePathNoTrailingSep distRelativeDir') : args
runExe exeName fullArgs = do
$logProcessRun (toFilePath exeName) fullArgs
-- Use createProcess_ to avoid the log file being closed afterwards
(Nothing, moutH, merrH, ph) <- liftIO $ createProcess_ "singleBuild" cp
let makeAbsolute = stripTHLoading -- If users want control, we should add a config option for this
ec <-
liftIO $
withAsync (runInBase $ maybePrintBuildOutput stripTHLoading makeAbsolute pkgDir LevelInfo mlogFile moutH) $ \outThreadID ->
withAsync (runInBase $ maybePrintBuildOutput False makeAbsolute pkgDir LevelWarn mlogFile merrH) $ \errThreadID -> do
ec <- waitForProcess ph
wait errThreadID
wait outThreadID
return ec
case ec of
ExitSuccess -> return ()
_ -> do
bss <-
case mlogFile of
Nothing -> return []
Just (logFile, h) -> do
liftIO $ hClose h
runResourceT
$ CB.sourceFile (toFilePath logFile)
=$= CT.decodeUtf8
$$ mungeBuildOutput stripTHLoading makeAbsolute pkgDir
=$ CL.consume
throwM $ CabalExitedUnsuccessfully
ec
taskProvides
exeName
fullArgs
(fmap fst mlogFile)
bss
where
cp0 = proc (toFilePath exeName) fullArgs
cp = cp0
{ cwd = Just $ toFilePath pkgDir
, Process.env = envHelper menv
-- Ideally we'd create a new pipe here and then close it
-- below to avoid the child process from taking from our
-- stdin. However, if we do this, the child process won't
-- be able to get the codepage on Windows that we want.
-- See:
-- https://github.com/commercialhaskell/stack/issues/738
-- , std_in = CreatePipe
, std_out =
case mlogFile of
Nothing -> CreatePipe
Just (_, h) -> UseHandle h
, std_err =
case mlogFile of
Nothing -> CreatePipe
Just (_, h) -> UseHandle h
}
wc <- getWhichCompiler
(exeName, fullArgs) <- case (esetupexehs, wc) of
(Left setupExe, _) -> return (setupExe, setupArgs)
(Right setuphs, compiler) -> do
distDir <- distDirFromDir pkgDir
let setupDir = distDir </> $(mkRelDir "setup")
outputFile = setupDir </> $(mkRelFile "setup")
createTree setupDir
compilerPath <-
case compiler of
Ghc -> getGhcPath
Ghcjs -> getGhcjsPath
runExe compilerPath $
[ "--make"
, "-odir", toFilePathNoTrailingSep setupDir
, "-hidir", toFilePathNoTrailingSep setupDir
, "-i", "-i."
] ++ packageArgs ++
[ toFilePath setuphs
, "-o", toFilePath outputFile
] ++
(case compiler of
Ghc -> []
Ghcjs -> ["-build-runner"])
return (outputFile, setupArgs)
runExe exeName $ (if boptsCabalVerbose eeBuildOpts then ("--verbose":) else id) fullArgs
maybePrintBuildOutput stripTHLoading makeAbsolute pkgDir level mlogFile mh =
case mh of
Just h ->
case mlogFile of
Just{} -> return ()
Nothing -> printBuildOutput stripTHLoading makeAbsolute pkgDir level h
Nothing -> return ()
singleBuild :: M env m
=> (m () -> IO ())
-> ActionContext
-> ExecuteEnv
-> Task
-> InstalledMap
-> Bool -- ^ Is this a final build?
-> m ()
singleBuild runInBase ac@ActionContext {..} ee@ExecuteEnv {..} task@Task {..} installedMap isFinalBuild = do
(allDepsMap, cache) <- getConfigCache ee task installedMap enableTests enableBenchmarks
mprecompiled <- getPrecompiled cache
minstalled <-
case mprecompiled of
Just precompiled -> copyPreCompiled precompiled
Nothing -> realConfigAndBuild cache allDepsMap
case minstalled of
Nothing -> return ()
Just installed -> do
writeFlagCache installed cache
liftIO $ atomically $ modifyTVar eeGhcPkgIds $ Map.insert taskProvides installed
where
pname = packageIdentifierName taskProvides
shouldHaddockPackage' = shouldHaddockPackage eeBuildOpts eeWanted pname
doHaddock package = shouldHaddockPackage' &&
not isFinalBuild &&
-- Works around haddock failing on bytestring-builder since it has no modules
-- when bytestring is new enough.
packageHasExposedModules package
buildingFinals = isFinalBuild || taskAllInOne
enableTests = buildingFinals && any isCTest (taskComponents task)
enableBenchmarks = buildingFinals && any isCBench (taskComponents task)
annSuffix = if result == "" then "" else " (" <> result <> ")"
where
result = T.intercalate " + " $ concat $
[ ["lib" | taskAllInOne && hasLib]
, ["exe" | taskAllInOne && hasExe]
, ["test" | enableTests]
, ["bench" | enableBenchmarks]
]
(hasLib, hasExe) = case taskType of
TTLocal lp -> (packageHasLibrary (lpPackage lp), not (Set.null (exesToBuild lp)))
-- This isn't true, but we don't want to have this info for
-- upstream deps.
TTUpstream{} -> (False, False)
getPrecompiled cache =
case taskLocation task of
Snap | not shouldHaddockPackage' -> do
mpc <- readPrecompiledCache taskProvides
(configCacheOpts cache)
(configCacheDeps cache)
case mpc of
Nothing -> return Nothing
Just pc | maybe False
(bcoSnapInstallRoot eeBaseConfigOpts `isParentOf`)
(parseAbsFile =<< (pcLibrary pc)) ->
-- If old precompiled cache files are left around but snapshots are deleted,
-- it is possible for the precompiled file to refer to the very library
-- we're building, and if flags are changed it may try to copy the library
-- to itself. This check prevents that from happening.
return Nothing
Just pc | otherwise -> do
let allM _ [] = return True
allM f (x:xs) = do
b <- f x
if b then allM f xs else return False
b <- liftIO $ allM D.doesFileExist $ maybe id (:) (pcLibrary pc) $ pcExes pc
return $ if b then Just pc else Nothing
_ -> return Nothing
copyPreCompiled (PrecompiledCache mlib exes) = do
announceTask task "copying precompiled package"
forM_ mlib $ \libpath -> do
menv <- getMinimalEnvOverride
withMVar eeInstallLock $ \() -> do
-- We want to ignore the global and user databases.
-- Unfortunately, ghc-pkg doesn't take such arguments on the
-- command line. Instead, we'll set GHC_PACKAGE_PATH. See:
-- https://github.com/commercialhaskell/stack/issues/1146
menv' <- modifyEnvOverride menv
$ Map.insert
"GHC_PACKAGE_PATH"
(T.pack $ toFilePathNoTrailingSep $ bcoSnapDB eeBaseConfigOpts)
-- In case a build of the library with different flags already exists, unregister it
-- before copying.
catch
(readProcessNull Nothing menv' "ghc-pkg"
[ "unregister"
, "--force"
, packageIdentifierString taskProvides
])
(\(ReadProcessException _ _ _ _) -> return ())
readProcessNull Nothing menv' "ghc-pkg"
[ "register"
, "--force"
, libpath
]
liftIO $ forM_ exes $ \exe -> do
D.createDirectoryIfMissing True bindir
let dst = bindir FP.</> FP.takeFileName exe
createLink exe dst `catchIO` \_ -> D.copyFile exe dst
case (mlib, exes) of
(Nothing, _:_) -> markExeInstalled (taskLocation task) taskProvides
_ -> return ()
-- Find the package in the database
wc <- getWhichCompiler
let pkgDbs = [bcoSnapDB eeBaseConfigOpts]
case mlib of
Nothing -> return $ Just $ Executable taskProvides
Just _ -> do
mpkgid <- loadInstalledPkg eeEnvOverride wc pkgDbs eeSnapshotDumpPkgs pname
return $ Just $
case mpkgid of
Nothing -> assert False $ Executable taskProvides
Just pkgid -> Library taskProvides pkgid
where
bindir = toFilePath $ bcoSnapInstallRoot eeBaseConfigOpts </> bindirSuffix
realConfigAndBuild cache allDepsMap = withSingleContext runInBase ac ee task (Just allDepsMap) Nothing
$ \package cabalfp pkgDir cabal announce _console _mlogFile -> do
_neededConfig <- ensureConfig cache pkgDir ee (announce ("configure" <> annSuffix)) cabal cabalfp
if boptsOnlyConfigure eeBuildOpts
then return Nothing
else liftM Just $ realBuild cache package pkgDir cabal announce
realBuild cache package pkgDir cabal announce = do
wc <- getWhichCompiler
markExeNotInstalled (taskLocation task) taskProvides
case taskType of
TTLocal lp -> do
when enableTests $ unsetTestSuccess pkgDir
writeBuildCache pkgDir $ lpNewBuildCache lp
TTUpstream _ _ -> return ()
() <- announce ("build" <> annSuffix)
config <- asks getConfig
extraOpts <- extraBuildOptions eeBuildOpts
preBuildTime <- modTime <$> liftIO getCurrentTime
cabal (configHideTHLoading config) $ ("build" :) $ (++ extraOpts) $
case (taskType, taskAllInOne, isFinalBuild) of
(_, True, True) -> fail "Invariant violated: cannot have an all-in-one build that also has a final build step."
(TTLocal lp, False, False) -> primaryComponentOptions lp
(TTLocal lp, False, True) -> finalComponentOptions lp
(TTLocal lp, True, False) -> primaryComponentOptions lp ++ finalComponentOptions lp
(TTUpstream{}, _, _) -> []
checkForUnlistedFiles taskType preBuildTime pkgDir
when (doHaddock package) $ do
announce "haddock"
sourceFlag <- do
hyped <- tryProcessStdout Nothing eeEnvOverride "haddock" ["--hyperlinked-source"]
case hyped of
-- Fancy crosslinked source
Right _ -> do
return ["--haddock-option=--hyperlinked-source"]
-- Older hscolour colouring
Left _ -> do
hscolourExists <- doesExecutableExist eeEnvOverride "HsColour"
unless hscolourExists $ $logWarn
("Warning: haddock not generating hyperlinked sources because 'HsColour' not\n" <>
"found on PATH (use 'stack install hscolour' to install).")
return ["--hyperlink-source" | hscolourExists]
cabal False (concat [["haddock", "--html", "--hoogle", "--html-location=../$pkg-$version/"]
,sourceFlag])
unless isFinalBuild $ withMVar eeInstallLock $ \() -> do
announce "copy/register"
cabal False ["copy"]
when (packageHasLibrary package) $ cabal False ["register"]
let (installedPkgDb, installedDumpPkgsTVar) =
case taskLocation task of
Snap ->
( bcoSnapDB eeBaseConfigOpts
, eeSnapshotDumpPkgs )
Local ->
( bcoLocalDB eeBaseConfigOpts
, eeLocalDumpPkgs )
let ident = PackageIdentifier (packageName package) (packageVersion package)
mpkgid <- if packageHasLibrary package
then do
mpkgid <- loadInstalledPkg eeEnvOverride wc [installedPkgDb] installedDumpPkgsTVar (packageName package)
case mpkgid of
Nothing -> throwM $ Couldn'tFindPkgId $ packageName package
Just pkgid -> return $ Library ident pkgid
else do
markExeInstalled (taskLocation task) taskProvides -- TODO unify somehow with writeFlagCache?
return $ Executable ident
case taskLocation task of
Snap -> writePrecompiledCache eeBaseConfigOpts taskProvides
(configCacheOpts cache)
(configCacheDeps cache)
mpkgid (packageExes package)
Local -> return ()
return mpkgid
loadInstalledPkg menv wc pkgDbs tvar name = do
dps <- ghcPkgDescribe name menv wc pkgDbs $ conduitDumpPackage =$ CL.consume
case dps of
[] -> return Nothing
[dp] -> do
liftIO $ atomically $ modifyTVar' tvar (Map.insert (dpGhcPkgId dp) dp)
return $ Just (dpGhcPkgId dp)
_ -> error "singleBuild: invariant violated: multiple results when describing installed package"
-- | Check if any unlisted files have been found, and add them to the build cache.
checkForUnlistedFiles :: M env m => TaskType -> ModTime -> Path Abs Dir -> m ()
checkForUnlistedFiles (TTLocal lp) preBuildTime pkgDir = do
(addBuildCache,warnings) <-
addUnlistedToBuildCache
preBuildTime
(lpPackage lp)
(lpCabalFile lp)
(lpNewBuildCache lp)
mapM_ ($logWarn . ("Warning: " <>) . T.pack . show) warnings
unless (null addBuildCache) $
writeBuildCache pkgDir $
Map.unions (lpNewBuildCache lp : addBuildCache)
checkForUnlistedFiles (TTUpstream _ _) _ _ = return ()
-- | Determine if all of the dependencies given are installed
depsPresent :: InstalledMap -> Map PackageName VersionRange -> Bool
depsPresent installedMap deps = all
(\(name, range) ->
case Map.lookup name installedMap of
Just (_, installed) -> installedVersion installed `withinRange` range
Nothing -> False)
(Map.toList deps)
singleTest :: M env m
=> (m () -> IO ())
-> TestOpts
-> [Text]
-> ActionContext
-> ExecuteEnv
-> Task
-> InstalledMap
-> m ()
singleTest runInBase topts testsToRun ac ee task installedMap = do
-- FIXME: Since this doesn't use cabal, we should be able to avoid using a
-- fullblown 'withSingleContext'.
(allDepsMap, _cache) <- getConfigCache ee task installedMap True False
withSingleContext runInBase ac ee task (Just allDepsMap) (Just "test") $ \package _cabalfp pkgDir _cabal announce _console mlogFile -> do
config <- asks getConfig
let needHpc = toCoverage topts
toRun <-
if toDisableRun topts
then do
announce "Test running disabled by --no-run-tests flag."
return False
else if toRerunTests topts
then return True
else do
success <- checkTestSuccess pkgDir
if success
then do
unless (null testsToRun) $ announce "skipping already passed test"
return False
else return True
when toRun $ do
buildDir <- distDirFromDir pkgDir
hpcDir <- hpcDirFromDir pkgDir
when needHpc (createTree hpcDir)
errs <- liftM Map.unions $ forM (Map.toList (packageTests package)) $ \(testName, suiteInterface) -> do
let stestName = T.unpack testName
(testName', isTestTypeLib) <-
case suiteInterface of
C.TestSuiteLibV09{} -> return (stestName ++ "Stub", True)
C.TestSuiteExeV10{} -> return (stestName, False)
interface -> throwM (TestSuiteTypeUnsupported interface)
exeName <- testExeName testName'
tixPath <- liftM (pkgDir </>) $ parseRelFile $ exeName ++ ".tix"
exePath <- liftM (buildDir </>) $ parseRelFile $ "build/" ++ testName' ++ "/" ++ exeName
exists <- fileExists exePath
menv <- liftIO $ configEnvOverride config EnvSettings
{ esIncludeLocals = taskLocation task == Local
, esIncludeGhcPackagePath = True
, esStackExe = True
, esLocaleUtf8 = False
}
if exists
then do
-- We clear out the .tix files before doing a run.
when needHpc $ do
tixexists <- fileExists tixPath
when tixexists $
$logWarn ("Removing HPC file " <> T.pack (toFilePath tixPath))
removeFileIfExists tixPath
let args = toAdditionalArgs topts
argsDisplay = case args of
[] -> ""
_ -> ", args: " <> T.intercalate " " (map showProcessArgDebug args)
announce $ "test (suite: " <> testName <> argsDisplay <> ")"
-- Clear "Progress: ..." message before
-- redirecting output.
when (isNothing mlogFile) $ do
$logStickyDone ""
liftIO $ hFlush stdout
liftIO $ hFlush stderr
let output =
case mlogFile of
Nothing -> Inherit
Just (_, h) -> UseHandle h
cp = (proc (toFilePath exePath) args)
{ cwd = Just $ toFilePath pkgDir
, Process.env = envHelper menv
, std_in = CreatePipe
, std_out = output
, std_err = output
}
-- Use createProcess_ to avoid the log file being closed afterwards
(Just inH, Nothing, Nothing, ph) <- liftIO $ createProcess_ "singleBuild.runTests" cp
when isTestTypeLib $ do
logPath <- buildLogPath package (Just stestName)
createTree (parent logPath)
liftIO $ hPutStr inH $ show (logPath, testName)
liftIO $ hClose inH
ec <- liftIO $ waitForProcess ph
-- Add a trailing newline, incase the test
-- output didn't finish with a newline.
when (isNothing mlogFile) ($logInfo "")
-- Move the .tix file out of the package
-- directory into the hpc work dir, for
-- tidiness.
when needHpc $
updateTixFile (packageName package) tixPath
return $ case ec of
ExitSuccess -> Map.empty
_ -> Map.singleton testName $ Just ec
else do
$logError $ T.concat
[ "Test suite "
, testName
, " executable not found for "
, packageNameText $ packageName package
]
return $ Map.singleton testName Nothing
when needHpc $ do
let testsToRun' = map f testsToRun
f tName =
case Map.lookup tName (packageTests package) of
Just C.TestSuiteLibV09{} -> tName <> "Stub"
_ -> tName
generateHpcReport pkgDir package testsToRun'
bs <- liftIO $
case mlogFile of
Nothing -> return ""
Just (logFile, h) -> do
hClose h
S.readFile $ toFilePath logFile
unless (Map.null errs) $ throwM $ TestSuiteFailure
(taskProvides task)
errs
(fmap fst mlogFile)
bs
singleBench :: M env m
=> (m () -> IO ())
-> BenchmarkOpts
-> [Text]
-> ActionContext
-> ExecuteEnv
-> Task
-> InstalledMap
-> m ()
singleBench runInBase beopts benchesToRun ac ee task installedMap = do
-- FIXME: Since this doesn't use cabal, we should be able to avoid using a
-- fullblown 'withSingleContext'.
(allDepsMap, _cache) <- getConfigCache ee task installedMap False True
withSingleContext runInBase ac ee task (Just allDepsMap) (Just "bench") $ \_package _cabalfp _pkgDir cabal announce _console _mlogFile -> do
let args = map T.unpack benchesToRun <> maybe []
((:[]) . ("--benchmark-options=" <>))
(beoAdditionalArgs beopts)
toRun <-
if beoDisableRun beopts
then do
announce "Benchmark running disabled by --no-run-benchmarks flag."
return False
else do
return True
when toRun $ do
announce "benchmarks"
cabal False ("bench" : args)
-- | Grab all output from the given @Handle@ and log it, stripping
-- Template Haskell "Loading package" lines and making paths absolute.
-- thread.
printBuildOutput :: (MonadIO m, MonadBaseControl IO m, MonadLogger m,
MonadThrow m)
=> Bool -- ^ exclude TH loading?
-> Bool -- ^ convert paths to absolute?
-> Path Abs Dir -- ^ package's root directory
-> LogLevel
-> Handle
-> m ()
printBuildOutput excludeTHLoading makeAbsolute pkgDir level outH = void $
CB.sourceHandle outH
$$ CT.decodeUtf8
=$ mungeBuildOutput excludeTHLoading makeAbsolute pkgDir
=$ CL.mapM_ (monadLoggerLog $(TH.location >>= liftLoc) "" level)
-- | Strip Template Haskell "Loading package" lines and making paths absolute.
mungeBuildOutput :: MonadIO m
=> Bool -- ^ exclude TH loading?
-> Bool -- ^ convert paths to absolute?
-> Path Abs Dir -- ^ package's root directory
-> ConduitM Text Text m ()
mungeBuildOutput excludeTHLoading makeAbsolute pkgDir = void $
CT.lines
=$ CL.map stripCarriageReturn
=$ CL.filter (not . isTHLoading)
=$ CL.mapM toAbsolutePath
where
-- | Is this line a Template Haskell "Loading package" line
-- ByteString
isTHLoading :: Text -> Bool
isTHLoading _ | not excludeTHLoading = False
isTHLoading bs =
"Loading package " `T.isPrefixOf` bs &&
("done." `T.isSuffixOf` bs || "done.\r" `T.isSuffixOf` bs)
-- | Convert GHC error lines with file paths to have absolute file paths
toAbsolutePath bs | not makeAbsolute = return bs
toAbsolutePath bs = do
let (x, y) = T.break (== ':') bs
mabs <-
if isValidSuffix y
then do
efp <- liftIO $ tryIO $ resolveFile pkgDir (T.unpack x)
case efp of
Left _ -> return Nothing
Right fp -> return $ Just $ T.pack (toFilePath fp)
else return Nothing
case mabs of
Nothing -> return bs
Just fp -> return $ fp `T.append` y
-- | Match the line:column format at the end of lines
isValidSuffix = isRight . parseOnly (lineCol <* endOfInput)
lineCol = char ':' >> (decimal :: Parser Int)
>> char ':' >> (decimal :: Parser Int)
>> (string ":" <|> string ": Warning:")
>> return ()
-- | Strip @\r@ characters from the byte vector. Used because Windows.
stripCarriageReturn :: Text -> Text
stripCarriageReturn = T.filter (/= '\r')
-- | Find the Setup.hs or Setup.lhs in the given directory. If none exists,
-- throw an exception.
getSetupHs :: Path Abs Dir -- ^ project directory
-> IO (Path Abs File)
getSetupHs dir = do
exists1 <- fileExists fp1
if exists1
then return fp1
else do
exists2 <- fileExists fp2
if exists2
then return fp2
else throwM $ NoSetupHsFound dir
where
fp1 = dir </> $(mkRelFile "Setup.hs")
fp2 = dir </> $(mkRelFile "Setup.lhs")
-- Do not pass `-hpcdir` as GHC option if the coverage is not enabled.
-- This helps running stack-compiled programs with dynamic interpreters like `hint`.
-- Cfr: https://github.com/commercialhaskell/stack/issues/997
extraBuildOptions :: M env m => BuildOpts -> m [String]
extraBuildOptions bopts = do
let ddumpOpts = " -ddump-hi -ddump-to-file"
case toCoverage (boptsTestOpts bopts) of
True -> do
hpcIndexDir <- toFilePathNoTrailingSep <$> hpcRelativeDir
return ["--ghc-options", "-hpcdir " ++ hpcIndexDir ++ ddumpOpts]
False -> return ["--ghc-options", ddumpOpts]
-- Library and executable build components.
primaryComponentOptions :: LocalPackage -> [String]
primaryComponentOptions lp = concat
[ ["lib:" ++ packageNameString (packageName (lpPackage lp))
-- TODO: get this information from target parsing instead,
-- which will allow users to turn off library building if
-- desired
| packageHasLibrary (lpPackage lp)]
, map (T.unpack . T.append "exe:") $ Set.toList $ exesToBuild lp
]
exesToBuild :: LocalPackage -> Set Text
exesToBuild lp = packageExes (lpPackage lp)
-- NOTE: Ideally we'd do something like the following code, allowing
-- the user to control which executables get built. However, due to
-- https://github.com/haskell/cabal/issues/2780 we must build all
-- exes...
--
-- if lpWanted lp
-- then exeComponents (lpComponents lp)
-- -- Build all executables in the event that no
-- -- specific list is provided (as happens with
-- -- extra-deps).
-- else packageExes (lpPackage lp)
-- Test-suite and benchmark build components.
finalComponentOptions :: LocalPackage -> [String]
finalComponentOptions lp =
map (T.unpack . decodeUtf8 . renderComponent) $
Set.toList $
Set.filter (\c -> isCTest c || isCBench c) (lpComponents lp)
taskComponents :: Task -> Set NamedComponent
taskComponents task =
case taskType task of
TTLocal lp -> lpComponents lp
TTUpstream{} -> Set.empty
-- | Take the given list of package dependencies and the contents of the global
-- package database, and construct a set of installed package IDs that:
--
-- * Excludes the Cabal library (it's added later)
--
-- * Includes all packages depended on by this package
--
-- * Includes all global packages, unless: (1) it's hidden, (2) it's shadowed
-- by a depended-on package, or (3) one of its dependencies is not met.
--
-- See:
--
-- * https://github.com/commercialhaskell/stack/issues/941
--
-- * https://github.com/commercialhaskell/stack/issues/944
--
-- * https://github.com/commercialhaskell/stack/issues/949
addGlobalPackages :: Map PackageIdentifier GhcPkgId -- ^ dependencies of the package
-> [DumpPackage () ()] -- ^ global packages
-> Set GhcPkgId
addGlobalPackages deps globals0 =
res
where
-- Initial set of packages: the installed IDs of all dependencies
res0 = Map.elems $ Map.filterWithKey (\ident _ -> not $ isCabal ident) deps
-- First check on globals: it's not shadowed by a dep, it's not Cabal, and
-- it's exposed
goodGlobal1 dp = not (isDep dp)
&& not (isCabal $ dpPackageIdent dp)
&& dpIsExposed dp
globals1 = filter goodGlobal1 globals0
-- Create a Map of unique package names in the global database
globals2 = Map.fromListWith chooseBest
$ map (packageIdentifierName . dpPackageIdent &&& id) globals1
-- Final result: add in globals that have their dependencies met
res = loop id (Map.elems globals2) $ Set.fromList res0
----------------------------------
-- Some auxiliary helper functions
----------------------------------
-- Is the given package identifier for any version of Cabal
isCabal (PackageIdentifier name _) = name == $(mkPackageName "Cabal")
-- Is the given package name provided by the package dependencies?
isDep dp = packageIdentifierName (dpPackageIdent dp) `Set.member` depNames
depNames = Set.map packageIdentifierName $ Map.keysSet deps
-- Choose the best of two competing global packages (the newest version)
chooseBest dp1 dp2
| getVer dp1 < getVer dp2 = dp2
| otherwise = dp1
where
getVer = packageIdentifierVersion . dpPackageIdent
-- Are all dependencies of the given package met by the given Set of
-- installed packages
depsMet dp gids = all (`Set.member` gids) (dpDepends dp)
-- Find all globals that have all of their dependencies met
loop front (dp:dps) gids
-- This package has its deps met. Add it to the list of dependencies
-- and then traverse the list from the beginning (this package may have
-- been a dependency of an earlier one).
| depsMet dp gids = loop id (front dps) (Set.insert (dpGhcPkgId dp) gids)
-- Deps are not met, keep going
| otherwise = loop (front . (dp:)) dps gids
-- None of the packages we checked can be added, therefore drop them all
-- and return our results
loop _ [] gids = gids
|
rubik/stack
|
src/Stack/Build/Execute.hs
|
Haskell
|
bsd-3-clause
| 66,832
|
{-# LANGUAGE NoMonomorphismRestriction #-}
module Numeric.Units.Dimensional.Test where
import Numeric.Units.Dimensional.Prelude
import qualified Prelude
import Test.HUnit
testPower = TestLabel "Power test" $ TestList
[ TestCase $ (9 *~ one) @=? (3 *~ one) ^ pos2
, TestCase $ (1 *~ one) @=? (12.1231 *~ one) ^ zero
, TestCase $ (0.25 *~ one) @=? (2 *~ one) ^ neg2
]
testDimensionless = TestLabel "Dimensionless test" $ TestList
[ TestCase $ (3 Prelude.** 2) *~ one @=? (3 *~ one) ** (2 *~ one)
]
testShow = TestLabel "Test 'Show' instance" $ TestList
[ TestCase $ show (1 *~ one) @?= "1"
, TestCase $ show (2 *~ meter) @?= "2 m"
, TestCase $ show (2.0 *~ (meter / second)) @?= "2.0 m s^-1"
, TestCase $ show (2.0 *~ (meter ^ pos2 / second ^ pos2)) @?= "2.0 m^2 s^-2"
, TestCase $ show (undefined :: DVelocity) @?= "m s^-1"
]
-- Collect the test cases.
tests = TestList
[ testPower
, testDimensionless
, testShow
]
main = runTestTT tests
|
bjornbm/dimensional-classic
|
Numeric/Units/Dimensional/Test.hs
|
Haskell
|
bsd-3-clause
| 1,009
|
import Data.Attoparsec.ByteString.Char8
import qualified Data.ByteString.Lazy as BL
import Options.Applicative (execParser)
import System.IO (hPutStrLn, stdout, stderr)
import CSV
import CommandLineArgs
import File
import Parse.Log
import Types
chooseParsingFunction :: CommandLineOpts -> (Parser LogEntry)
chooseParsingFunction args = case parseAsCommon args of
True -> parseAsCommonLogLine
False -> parseAsExtendedLogLine
getLog :: CommandLineOpts -> IO Log
getLog args = do
logFile <- readLog (logPath args)
let parserChoice = chooseParsingFunction args
return $ parseFileLines parserChoice logFile
main :: IO ()
main = do
args <- execParser opts
logData <- getLog args
let csv = toCSV logData
BL.hPut stdout csv
hPutStrLn stderr "Conversion to CSV completed."
return ()
|
safarijv/ncsa-logparse
|
src/Main.hs
|
Haskell
|
bsd-3-clause
| 793
|
module Lib
( someFunc
) where
someFunc :: IO ()
someFunc = putStrLn "someFunc"
|
erantapaa/parse-complex
|
src/Lib.hs
|
Haskell
|
bsd-3-clause
| 89
|
<?xml version="1.0" encoding="UTF-8"?>
<!DOCTYPE helpset PUBLIC "-//Sun Microsystems Inc.//DTD JavaHelp HelpSet Version 2.0//EN" "http://java.sun.com/products/javahelp/helpset_2_0.dtd">
<helpset version="2.0" xml:lang="sl-SI">
<title>Server-Sent Events | ZAP Extension</title>
<maps>
<homeID>top</homeID>
<mapref location="map.jhm"/>
</maps>
<view>
<name>TOC</name>
<label>Contents</label>
<type>org.zaproxy.zap.extension.help.ZapTocView</type>
<data>toc.xml</data>
</view>
<view>
<name>Index</name>
<label>Index</label>
<type>javax.help.IndexView</type>
<data>index.xml</data>
</view>
<view>
<name>Search</name>
<label>Search</label>
<type>javax.help.SearchView</type>
<data engine="com.sun.java.help.search.DefaultSearchEngine">
JavaHelpSearch
</data>
</view>
<view>
<name>Favorites</name>
<label>Favorites</label>
<type>javax.help.FavoritesView</type>
</view>
</helpset>
|
0xkasun/security-tools
|
src/org/zaproxy/zap/extension/sse/resources/help_sl_SI/helpset_sl_SI.hs
|
Haskell
|
apache-2.0
| 980
|
{-
Some tests to verify that serialisation works as expected
-}
module AllTests(tests)
where
import GHC.Packing
import qualified Data.Array.IArray as A
import Control.Concurrent
import System.Environment
import System.IO
import System.Directory
import qualified Data.ByteString as B
import Control.Exception
import Data.Typeable
import Distribution.TestSuite
-- this test uses the trySerialize routine. We expect to trigger some
-- exceptions and catch them as appropriate.
catchPackExc :: IO () -> IO ()
catchPackExc io = io `catch` (\e -> putStrLn (show (e::PackException)))
-- need a time-wasting function which allocates...
nfib :: Integer -> Integer
nfib 0 = 1
nfib 1 = 1
nfib n = let n1 = nfib (n-1)
n2 = nfib (n-2)
in 1 + 2*n1 + n2 - n1
-- make a test instance. Action should check result and return Bool
runIt :: String -> IO Bool -> TestInstance
runIt name action
= TestInstance
{ run = action >>= return . Finished .
(\b -> if b then Pass
else Fail "unexpected output (see log)")
, name = "Test case " ++ name
, tags = []
, options = []
, setOption = \_ _ -> Right (runIt name action)
}
tests :: IO [ Test ]
tests = do putStrLn "Running all tests"
mapM (return . Test . uncurry runIt) mytests
-- all configured tests, see below
mytests = [eval_array, pack_array, pack_ThreadId, pack_MVar ]
-- test data
arr, output :: A.Array Int Int
arr = A.array (0,127) [ (i,i) | i <- [0..127] ]
output = A.amap (2*) arr
n :: Int
n = 3
eval_array :: (String, IO Bool)
eval_array = ("eval. array",
do let out = show $ take n $ A.elems output
putStrLn $ "Evaluated: " ++ out
return (out == "[0,2,4]")
)
pack_array :: (String, IO Bool)
pack_array = ("duplicating an array of 128 elements",
do packet1 <- trySerialize output
putStrLn (take (3*80) (show packet1) ++ "...")
putStrLn "now unpacking (deserialize):"
copy <- deserialize packet1
putStrLn ("unpacked, now evaluate")
putStrLn (show copy)
return $ copy == A.amap (2*) arr
)
expectException :: Typeable a => PackException -> IO (Serialized a) -> IO Bool
expectException exception action
= do putStrLn ("expect exception " ++ show exception)
action >>= print
return False
`catch` \e -> do putStrLn ("Got: " ++ show e)
return (e == exception)
pack_ThreadId :: (String, IO Bool)
pack_ThreadId = ("packing a thread ID (unsupported)",
do t <- myThreadId
expectException P_UNSUPPORTED $ trySerialize t
)
pack_MVar :: (String, IO Bool)
pack_MVar = ("packing an MVar (should be cannotpack)",
do m <- newEmptyMVar :: IO (MVar Integer)
expectException P_CANNOTPACK $ trySerialize m
)
|
ajnsit/packman
|
Test/AllTests.hs
|
Haskell
|
bsd-3-clause
| 3,030
|
--------------------------------------------------------------------------------
{-# LANGUAGE CPP #-}
{-# LANGUAGE OverloadedStrings #-}
module Hakyll.Check
( Check (..)
, check
) where
--------------------------------------------------------------------------------
import Control.Applicative ((<$>))
import Control.Monad (forM_)
import Control.Monad.Reader (ask)
import Control.Monad.RWS (RWST, runRWST)
import Control.Monad.Trans (liftIO)
import Control.Monad.Writer (tell)
import Data.List (isPrefixOf)
import Data.Monoid (Monoid (..))
import Data.Set (Set)
import qualified Data.Set as S
import Network.URI (unEscapeString)
import System.Directory (doesDirectoryExist, doesFileExist)
import System.Exit (ExitCode (..))
import System.FilePath (takeDirectory, takeExtension, (</>))
import qualified Text.HTML.TagSoup as TS
--------------------------------------------------------------------------------
#ifdef CHECK_EXTERNAL
import Control.Exception (AsyncException (..),
SomeException (..), handle, throw)
import Control.Monad.State (get, modify)
import Data.List (intercalate)
import Data.Typeable (cast)
import Data.Version (versionBranch)
import GHC.Exts (fromString)
import qualified Network.HTTP.Conduit as Http
import qualified Network.HTTP.Types as Http
import qualified Paths_hakyll as Paths_hakyll
#endif
--------------------------------------------------------------------------------
import Hakyll.Core.Configuration
import Hakyll.Core.Logger (Logger)
import qualified Hakyll.Core.Logger as Logger
import Hakyll.Core.Util.File
import Hakyll.Web.Html
--------------------------------------------------------------------------------
data Check = All | InternalLinks
deriving (Eq, Ord, Show)
--------------------------------------------------------------------------------
check :: Configuration -> Logger -> Check -> IO ExitCode
check config logger check' = do
((), write) <- runChecker checkDestination config logger check'
return $ if checkerFaulty write > 0 then ExitFailure 1 else ExitSuccess
--------------------------------------------------------------------------------
data CheckerRead = CheckerRead
{ checkerConfig :: Configuration
, checkerLogger :: Logger
, checkerCheck :: Check
}
--------------------------------------------------------------------------------
data CheckerWrite = CheckerWrite
{ checkerFaulty :: Int
, checkerOk :: Int
} deriving (Show)
--------------------------------------------------------------------------------
instance Monoid CheckerWrite where
mempty = CheckerWrite 0 0
mappend (CheckerWrite f1 o1) (CheckerWrite f2 o2) =
CheckerWrite (f1 + f2) (o1 + o2)
--------------------------------------------------------------------------------
type CheckerState = Set String
--------------------------------------------------------------------------------
type Checker a = RWST CheckerRead CheckerWrite CheckerState IO a
--------------------------------------------------------------------------------
runChecker :: Checker a -> Configuration -> Logger -> Check
-> IO (a, CheckerWrite)
runChecker checker config logger check' = do
let read' = CheckerRead
{ checkerConfig = config
, checkerLogger = logger
, checkerCheck = check'
}
(x, _, write) <- runRWST checker read' S.empty
Logger.flush logger
return (x, write)
--------------------------------------------------------------------------------
checkDestination :: Checker ()
checkDestination = do
config <- checkerConfig <$> ask
files <- liftIO $ getRecursiveContents
(const $ return False) (destinationDirectory config)
let htmls =
[ destinationDirectory config </> file
| file <- files
, takeExtension file == ".html"
]
forM_ htmls checkFile
--------------------------------------------------------------------------------
checkFile :: FilePath -> Checker ()
checkFile filePath = do
logger <- checkerLogger <$> ask
contents <- liftIO $ readFile filePath
Logger.header logger $ "Checking file " ++ filePath
let urls = getUrls $ TS.parseTags contents
forM_ urls $ \url -> do
Logger.debug logger $ "Checking link " ++ url
checkUrl filePath url
--------------------------------------------------------------------------------
checkUrl :: FilePath -> String -> Checker ()
checkUrl filePath url
| isExternal url = checkExternalUrl url
| hasProtocol url = skip "Unknown protocol, skipping"
| otherwise = checkInternalUrl filePath url
where
validProtoChars = ['A'..'Z'] ++ ['a'..'z'] ++ ['0'..'9'] ++ "+-."
hasProtocol str = case break (== ':') str of
(proto, ':' : _) -> all (`elem` validProtoChars) proto
_ -> False
--------------------------------------------------------------------------------
ok :: String -> Checker ()
ok _ = tell $ mempty {checkerOk = 1}
--------------------------------------------------------------------------------
skip :: String -> Checker ()
skip reason = do
logger <- checkerLogger <$> ask
Logger.debug logger $ reason
tell $ mempty {checkerOk = 1}
--------------------------------------------------------------------------------
faulty :: String -> Checker ()
faulty url = do
logger <- checkerLogger <$> ask
Logger.error logger $ "Broken link to " ++ show url
tell $ mempty {checkerFaulty = 1}
--------------------------------------------------------------------------------
checkInternalUrl :: FilePath -> String -> Checker ()
checkInternalUrl base url = case url' of
"" -> ok url
_ -> do
config <- checkerConfig <$> ask
let dest = destinationDirectory config
dir = takeDirectory base
filePath
| "/" `isPrefixOf` url' = dest ++ url'
| otherwise = dir </> url'
exists <- checkFileExists filePath
if exists then ok url else faulty url
where
url' = stripFragments $ unEscapeString url
--------------------------------------------------------------------------------
checkExternalUrl :: String -> Checker ()
#ifdef CHECK_EXTERNAL
checkExternalUrl url = do
logger <- checkerLogger <$> ask
needsCheck <- (== All) . checkerCheck <$> ask
checked <- (url `S.member`) <$> get
if not needsCheck || checked
then Logger.debug logger "Already checked, skipping"
else do
isOk <- liftIO $ handle (failure logger) $
Http.withManager $ \mgr -> do
request <- Http.parseUrl urlToCheck
response <- Http.http (settings request) mgr
let code = Http.statusCode (Http.responseStatus response)
return $ code >= 200 && code < 300
modify $ if schemeRelative url
then S.insert urlToCheck . S.insert url
else S.insert url
if isOk then ok url else faulty url
where
-- Add additional request info
settings r = r
{ Http.method = "HEAD"
, Http.redirectCount = 10
, Http.requestHeaders = ("User-Agent", ua) : Http.requestHeaders r
}
-- Nice user agent info
ua = fromString $ "hakyll-check/" ++
(intercalate "." $ map show $ versionBranch $ Paths_hakyll.version)
-- Catch all the things except UserInterrupt
failure logger (SomeException e) = case cast e of
Just UserInterrupt -> throw UserInterrupt
_ -> Logger.error logger (show e) >> return False
-- Check scheme-relative links
schemeRelative = isPrefixOf "//"
urlToCheck = if schemeRelative url then "http:" ++ url else url
#else
checkExternalUrl _ = return ()
#endif
--------------------------------------------------------------------------------
-- | Wraps doesFileExist, also checks for index.html
checkFileExists :: FilePath -> Checker Bool
checkFileExists filePath = liftIO $ do
file <- doesFileExist filePath
dir <- doesDirectoryExist filePath
case (file, dir) of
(True, _) -> return True
(_, True) -> doesFileExist $ filePath </> "index.html"
_ -> return False
--------------------------------------------------------------------------------
stripFragments :: String -> String
stripFragments = takeWhile (not . flip elem ['?', '#'])
|
Minoru/hakyll
|
src/Hakyll/Check.hs
|
Haskell
|
bsd-3-clause
| 9,141
|
module PackageTests.ReexportedModules.Check where
import Data.Version
import PackageTests.PackageTester
import System.FilePath
import Test.Tasty.HUnit
import Data.Maybe
import Data.List
import Control.Monad
import Data.Char
import Text.ParserCombinators.ReadP
orFail :: String -> [(a, String)] -> a
orFail err r = case find (all isSpace . snd) r of
Nothing -> error err
Just (i, _) -> i
find' :: (a -> Bool) -> [a] -> Maybe a
find' = find
suite :: FilePath -> Assertion
suite ghcPath = do
-- ToDo: Turn this into a utility function
(_, _, xs) <- run Nothing ghcPath [] ["--info"]
let compat = (>= Version [7,9] [])
. orFail "could not parse version"
. readP_to_S parseVersion
. snd
. fromJust
. find' ((=="Project version").fst)
. orFail "could not parse ghc --info output"
. reads
$ xs
when compat $ do
let spec = PackageSpec
{ directory = "PackageTests" </> "ReexportedModules"
, configOpts = []
, distPref = Nothing
}
result <- cabal_build spec ghcPath
assertBuildSucceeded result
|
corngood/cabal
|
Cabal/tests/PackageTests/ReexportedModules/Check.hs
|
Haskell
|
bsd-3-clause
| 1,220
|
{-# LANGUAGE ScopedTypeVariables, JavaScriptFFI, ForeignFunctionInterface #-}
module Main where
import Prelude hiding (print, putStrLn)
import Control.Concurrent
import Control.Concurrent.MVar
import Control.Exception
import Control.Monad
import Data.List (intersperse)
import GHCJS.Types
import GHCJS.Concurrent
import GHCJS.Foreign.Callback
import qualified Data.JSString as JSS
---------------------------------------------------------------------------
-- our usual standard io implementation is asynchronous, use this more
-- primitive mechanism to print the results
print :: Show a => a -> IO ()
print = putStrLn . show
putStrLn :: String -> IO ()
putStrLn = js_log . JSS.pack
foreign import javascript unsafe
"console.log($1);"
js_log :: JSString -> IO ()
---------------------------------------------------------------------------
foreign import javascript unsafe
"$1();"
js_runCallback :: Callback a -> IO ()
---------------------------------------------------------------------------
main :: IO ()
main = sequence_ .
intersperse (putStrLn "---------------------------") $
[ synchronously1
, wouldBlock1
, wouldBlock2
, callback1
, callback2
, callback3
]
printIsSync :: IO ()
printIsSync = do
tid <- myThreadId
sync <- isThreadSynchronous tid
ca <- isThreadContinueAsync tid
print (sync, ca)
printMVar :: MVar () -> IO ()
printMVar = print <=< takeMVar
synchronously1 :: IO ()
synchronously1 = do
putStrLn "synchronously1"
printIsSync
synchronously printIsSync
printIsSync
let h x m = m `catch` \(e::SomeException) -> do
putStrLn ("handler: " ++ x)
printIsSync
h "outside" (synchronously $ printIsSync >> error "err")
printIsSync
synchronously (h "inside" (printIsSync >> error "err"))
printIsSync
putStrLn "synchronously1 done"
-- blocking on MVar should give us an exception, the exception can be handled
wouldBlock1 :: IO ()
wouldBlock1 = do
putStrLn "wouldBlock1"
x `catch` \(e::SomeException) ->
do putStrLn "exception caught: outer"
print e
putStrLn "wouldBlock1 done"
where
x = do
mv1 <- newEmptyMVar
printIsSync
synchronously $ do
printIsSync
printMVar mv1 `catch` \(e::SomeException) -> do
putStrLn "exeption caught inner1"
print e
printMVar mv1 `catch` \(e::WouldBlockException) -> do
putStrLn "exeption caught inner2"
print e
putStrLn "ok"
printMVar mv1
putStrLn "unreachable"
printIsSync
-- threadDelay should give us the exception too
wouldBlock2 :: IO ()
wouldBlock2 = do
putStrLn "wouldBlock2"
threadDelay 500000
let x = synchronously $ do
threadDelay 500000 `catch` \(e::WouldBlockException) ->
putStrLn "exception caught: inner"
printIsSync
putStrLn "ok"
threadDelay 500000
putStrLn "unreachable"
x `catch` \(e::WouldBlockException) ->
putStrLn "exception caught: outer"
putStrLn "wouldBlock2 done"
-- synchronous callbacks give us an exception
callback1 :: IO ()
callback1 = do
putStrLn "callback1"
mv1 <- newEmptyMVar
cb1 <- syncCallback ThrowWouldBlock $ do
printIsSync
putStrLn "ok"
printMVar mv1 `catch` \(e::WouldBlockException) ->
putStrLn "exception: would block"
printIsSync
putStrLn "ok"
printMVar mv1
-- thread would block error ends up on stderr
putStrLn "unreachable"
js_runCallback cb1
putStrLn "callback1 finished"
releaseCallback cb1
callback2 :: IO ()
callback2 = do
putStrLn "callback2"
mv1 <- newEmptyMVar
mv2 <- newEmptyMVar
cb1 <- syncCallback ContinueAsync $ do
printIsSync
putStrLn "ok"
printMVar mv1
putStrLn "callback"
printIsSync
putMVar mv2 ()
js_runCallback cb1
putStrLn "main"
putMVar mv1 ()
printMVar mv2
putStrLn "main"
putStrLn "callback2 done"
-- async callbacks are async
callback3 :: IO ()
callback3 = do
putStrLn "callback3"
mv1 <- newEmptyMVar
cb1 <- asyncCallback $ do
putStrLn "async callback"
printIsSync
putMVar mv1 ()
printMVar mv1
putStrLn "callback"
putMVar mv1 ()
js_runCallback cb1
printMVar mv1
putStrLn "main"
putMVar mv1 ()
printMVar mv1
putStrLn "main"
printIsSync
releaseCallback cb1
putStrLn "callback3 done"
|
seereason/ghcjs
|
test/conc/syncThreads.hs
|
Haskell
|
mit
| 4,442
|
{-# LANGUAGE MultiParamTypeClasses, FunctionalDependencies, FlexibleContexts, GADTs #-}
module FDsFromGivens2 where
class C a b | a -> b where
cop :: a -> b -> ()
data KCC where
KCC :: C Char Char => () -> KCC
f :: C Char [a] => a -> a
f = undefined
bar :: KCC -> a -> a
bar (KCC _) = f
|
ezyang/ghc
|
testsuite/tests/typecheck/should_fail/FDsFromGivens2.hs
|
Haskell
|
bsd-3-clause
| 297
|
module Bowling (score, BowlingError(..)) where
data BowlingError = IncompleteGame
| InvalidRoll { rollIndex :: Int, rollValue :: Int }
deriving (Eq, Show)
score :: [Int] -> Either BowlingError Int
score rolls = error "You need to implement this function."
|
exercism/xhaskell
|
exercises/practice/bowling/src/Bowling.hs
|
Haskell
|
mit
| 279
|
{-# LANGUAGE Safe #-}
{-# LANGUAGE DoAndIfThenElse #-}
module Control.Concurrent.Singular.Event.Primitive (
Event (), newEvent, always, sync
) where
import Control.Concurrent.Singular.Event.Status
import Data.List.Util
import Control.Concurrent.MVar (newEmptyMVar, putMVar, takeMVar)
import Control.Monad (unless)
import Data.Monoid (Monoid, mempty, mappend)
data BaseEvent a = BaseEvent {
commit :: !(IO (Maybe a)),
block :: StatusRef -> (a -> IO ()) -> IO ()
}
instance Functor BaseEvent where
fmap f (BaseEvent commit' block') = BaseEvent {
commit = fmap (fmap f) commit',
block = \status handler -> block' status (handler . f)
}
newtype Event a = Event { unEvent :: [BaseEvent a] }
newEvent :: IO (Maybe a) -> (StatusRef -> (a -> IO ()) -> IO ()) -> Event a
newEvent commit' block' = Event [BaseEvent commit' block']
always :: a -> Event a
always x = newEvent (return $ Just x) (\ _ _ -> return ())
instance Functor Event where
fmap f = Event . map (fmap f) . unEvent
instance Monoid (Event a) where
mempty = Event []
mappend (Event x) (Event y) = Event (x ++ y)
sync :: Event a -> IO a
sync (Event bases) = shuffle bases >>= commit'
where
commit' [] = block'
commit' (x:xs) = commit x >>= maybe (commit' xs) return
block' = do
status <- newStatusRef
output <- newEmptyMVar
let block'' [] = return ()
block'' (x:xs) = do
block x status $ putMVar output
status' <- readStatusRef status
unless (status' == Synced) $
block'' xs
shuffle bases >>= block''
takeMVar output
|
YellPika/Hannel
|
src/Control/Concurrent/Singular/Event/Primitive.hs
|
Haskell
|
mit
| 1,667
|
{-# LANGUAGE TypeOperators #-}
import Numeric.Noise.Perlin
import System.Random (randomIO,randomIO,randomRs,newStdGen,mkStdGen)
import Graphics.Gloss
import Graphics.Gloss.Data.Color (makeColor8)
import qualified Data.Array.Repa as R
type Seed = Int
rangeMap :: (Ord a) => b -> [(a,b)] -> a -> b
rangeMap def rs x = case dropWhile (\(a,_) -> x > a) rs of
(_,b):_ -> b
_______ -> def
main = do
heightSeed <- randomIO :: IO Int
treeSeed <- randomIO :: IO Int
let heightP = perlin heightSeed 16 (1/128) (1/2)
treeP = perlin treeSeed 16 (1/128) (1/2)
w = 1024 :: Int
h = 1024 :: Int
shape = (R.Z R.:. w R.:. h)
heightArray <- R.computeP $ R.fromFunction shape
(\(R.Z R.:.x R.:. y) -> ( fromIntegral x
, fromIntegral y
, rangeMap 255 [(-0.9,0),(0.25,130)] $
noiseValue heightP (fromIntegral x, fromIntegral y, 0)
)
) :: IO (R.Array R.U R.DIM2 (Float,Float,Int))
let heightPic = R.map (\(x,y,z) -> scale 1 1
$ translate (x - fromIntegral w / 2) (y - fromIntegral h / 2)
$ color (makeColor8 z z z 255)
$ rectangleSolid 1 1
) heightArray
let trees = randomPerlin heightSeed treeSeed (0,3) (w,h)
treePic = shapeMap (\x y z -> let fx = fromIntegral x
fy = fromIntegral y in
scale 1 1
$ translate (fx - fromIntegral w / 2) (fy - fromIntegral h / 2)
$ (if z>3.25 then color (makeColor8 0 255 0 255) else color (makeColor8 0 0 0 255))
$ rectangleSolid 1 1
) trees
display (FullScreen (1600, 1200)) black $ pictures $ R.toList heightPic
-- display (InWindow "Perlin Test" (1600, 1200) (0, 0)) black $ pictures $ R.toList treePic
shapeMap f array = R.fromFunction (R.extent array) (\sh@(R.Z R.:.x R.:. y) -> f x y $ array R.! sh)
randomPerlin :: Seed -- Perlin Seed
-> Seed -- Random Seed
-> (Double,Double) -- Random Range
-> (Int,Int) -- Matrix Width & Height
-> R.Array R.U R.DIM2 Double
randomPerlin pSeed rSeed range (w,h) = R.fromListUnboxed shape zips
where
perl = perlin pSeed 16 (1/128) (1/2)
shape = R.ix2 w h
rnds = randomRs range $ mkStdGen rSeed
zips = zipWith (\(x,y) rnd -> rnd + noiseValue perl (fromIntegral x, fromIntegral y, 0))
[(x,y) | x <- [0..w-1], y <- [0..h-1]]
rnds
|
RTS2013/RTS
|
tests/mapGeneration_test/PerlinNoise/HaskellPerlin.hs
|
Haskell
|
mit
| 2,801
|
module Data.BigBunnyAndDeer.DeerInfo
( findDeerEntry
, updateDeerInfo
, writeDeerInfo
, parseRawDeerInfo
, fetchDeerInfo
) where
import Data.Maybe
import Control.Arrow
import Control.Monad.IO.Class
import System.Directory
import qualified System.IO.Strict as SIO
import qualified Data.IntMap as IM
import Data.Default
import Data.Coerce
import Data.BigBunnyAndDeer.Type
deerEntryToPair :: DeerEntry -> (Int, Maybe Integer)
deerEntryToPair (DeerEntry a b) = (a,b)
updateDeerInfo :: DeerId -> Integer -> DeerInfo -> DeerInfo
updateDeerInfo did newTS = coerce $ IM.alter (Just . alterEntry) did
where
alterEntry :: Maybe DeerEntry -> DeerEntry
alterEntry old = case old of
Nothing -> DeerEntry 1 (Just newTS)
Just (DeerEntry tt _) -> DeerEntry (succ tt) (Just newTS)
fetchDeerInfo :: FilePath -> IO DeerInfo
fetchDeerInfo fp = do
b <- liftIO $ doesFileExist fp
if b
then parseRawDeerInfo <$> getRawDeerInfo fp
else return (coerce (def :: IM.IntMap DeerEntry))
getRawDeerInfo :: FilePath -> IO String
getRawDeerInfo = SIO.readFile
parseLine :: String -> (Int, (Int, Maybe Integer))
parseLine = read
dumpDeerInfo :: DeerInfo -> String
dumpDeerInfo =
coerce >>>
IM.toList >>>
map (second deerEntryToPair >>> show) >>>
unlines
parseRawDeerInfo :: String -> DeerInfo
parseRawDeerInfo =
lines >>>
map (parseLine >>> second (uncurry DeerEntry)) >>>
IM.fromList >>> DeerInfo
findDeerEntry :: DeerInfo -> DeerId -> DeerEntry
findDeerEntry di did = fromMaybe def (IM.lookup did (coerce di))
writeDeerInfo :: FilePath -> DeerInfo -> IO ()
writeDeerInfo fp di = writeFile fp (dumpDeerInfo di)
|
Javran/BigBunnyAndDeer
|
src/Data/BigBunnyAndDeer/DeerInfo.hs
|
Haskell
|
mit
| 1,674
|
{-# LANGUAGE OverloadedStrings #-}
module System.Directory.Watchman.Expression
( Expression
, renderExpression
, true
, false
, System.Directory.Watchman.Expression.all
, System.Directory.Watchman.Expression.any
, (.&&)
, (.||)
, dirname
, dirname'
, empty
, exists
, match
, match'
, name
, name'
, System.Directory.Watchman.Expression.not
, size
, suffix
, type_
, caseSensitive
, caseInsensitive
, basename
, wholename
, depth
, includeDotFiles
, noEscape
) where
import Data.Foldable (foldl')
import Data.ByteString (ByteString)
import Data.Int
import qualified Data.Map.Strict as M
import qualified Data.Sequence as Seq
import System.Directory.Watchman.FileType
import System.Directory.Watchman.WFilePath
import System.Directory.Watchman.BSER
data CaseSensitivity
= CaseSensitive
| CaseInsensitive
deriving (Show, Eq, Ord)
class HasCaseSensitivityOption a where
setCaseSensitivity :: CaseSensitivity -> a -> a
data PathScope
= BaseName
| WholeName
deriving (Show, Eq, Ord)
class HasPathScopeOption a where
setPathScope :: PathScope -> a -> a
data Expression
= EAllOf ![Expression]
| EAnyOf ![Expression]
| EDirName !WFilePath !DirNameParams
| ETrue
| EFalse
| EEmpty
| EExists
| EMatch !ByteString !MatchParams
| EName ![WFilePath] !NameParams
| ESince () -- TODO !!!
| ENot !Expression
| ESize !(Comparison Int64)
| ESuffix !ByteString
| EType !FileType
deriving (Show, Eq, Ord)
data DirNameParams = DirNameParams
{ _DirNameParams_Depth :: !(Comparison Int)
, _DirNameParams_CaseSensitivity :: !CaseSensitivity
}
deriving (Show, Eq, Ord)
defaultDirNameParams :: DirNameParams
defaultDirNameParams = DirNameParams
{ _DirNameParams_Depth = Ge 0
, _DirNameParams_CaseSensitivity = CaseSensitive
}
instance HasCaseSensitivityOption DirNameParams where
setCaseSensitivity c x = x { _DirNameParams_CaseSensitivity = c }
data MatchParams = MatchParams
{ _MatchParams_CaseSensitivity :: !CaseSensitivity
, _MatchParams_PathScope :: !PathScope
, _MatchParams_IncludeDotFiles :: !Bool
, _MatchParams_NoEscape :: !Bool
}
deriving (Show, Eq, Ord)
defaultMatchParams :: MatchParams
defaultMatchParams = MatchParams
{ _MatchParams_CaseSensitivity = CaseSensitive
, _MatchParams_PathScope = BaseName
, _MatchParams_IncludeDotFiles = False
, _MatchParams_NoEscape = False
}
instance HasCaseSensitivityOption MatchParams where
setCaseSensitivity c x = x { _MatchParams_CaseSensitivity = c }
instance HasPathScopeOption MatchParams where
setPathScope c x = x { _MatchParams_PathScope = c }
data NameParams = NameParams
{ _NameParams_CaseSensitivity :: !CaseSensitivity
, _NameParams_PathScope :: !PathScope
}
deriving (Show, Eq, Ord)
defaultNameParams :: NameParams
defaultNameParams = NameParams
{ _NameParams_CaseSensitivity = CaseSensitive
, _NameParams_PathScope = BaseName
}
instance HasCaseSensitivityOption NameParams where
setCaseSensitivity c x = x { _NameParams_CaseSensitivity = c }
instance HasPathScopeOption NameParams where
setPathScope c x = x { _NameParams_PathScope = c }
true :: Expression
true = ETrue
false :: Expression
false = EFalse
all :: [Expression] -> Expression
all = EAllOf
any :: [Expression] -> Expression
any = EAnyOf
infixr 3 .&&
(.&&) :: Expression -> Expression -> Expression
lhs .&& rhs = EAllOf [lhs, rhs]
infixr 2 .||
(.||) :: Expression -> Expression -> Expression
lhs .|| rhs = EAnyOf [lhs, rhs]
dirname :: WFilePath -> Expression
dirname path = EDirName path defaultDirNameParams
dirname' :: WFilePath -> [DirNameParams -> DirNameParams] -> Expression
dirname' path modifiers = EDirName path (applyModifiers defaultDirNameParams modifiers)
empty :: Expression
empty = EEmpty
exists :: Expression
exists = EExists
match :: ByteString -> Expression
match pattern = EMatch pattern defaultMatchParams
match' :: ByteString -> [MatchParams -> MatchParams] -> Expression
match' pattern modifiers = EMatch pattern (applyModifiers defaultMatchParams modifiers)
name :: [WFilePath] -> Expression
name files = EName files defaultNameParams
name' :: [WFilePath] -> [NameParams -> NameParams] -> Expression
name' files modifiers = EName files (applyModifiers defaultNameParams modifiers)
not :: Expression -> Expression
not = ENot
size :: Comparison Int64 -> Expression
size = ESize
suffix :: ByteString -> Expression
suffix = ESuffix
type_ :: FileType -> Expression
type_ = EType
applyModifiers :: a -> [a -> a] -> a
applyModifiers def modifiers = foldl' (\x f -> f x) def modifiers
caseSensitive :: HasCaseSensitivityOption a => a -> a
caseSensitive = setCaseSensitivity CaseSensitive
caseInsensitive :: HasCaseSensitivityOption a => a -> a
caseInsensitive = setCaseSensitivity CaseInsensitive
basename :: HasPathScopeOption a => a -> a
basename = setPathScope BaseName
wholename :: HasPathScopeOption a => a -> a
wholename = setPathScope BaseName
depth :: Comparison Int -> DirNameParams -> DirNameParams
depth c x = x { _DirNameParams_Depth = c }
includeDotFiles :: MatchParams -> MatchParams
includeDotFiles x = x { _MatchParams_IncludeDotFiles = True }
noEscape :: MatchParams -> MatchParams
noEscape x = x { _MatchParams_NoEscape = True }
data Comparison a
= Eq !a -- ^ Equal
| Ne !a -- ^ Not Equal
| Gt !a -- ^ Greater Than
| Ge !a -- ^ Greater Than or Equal
| Lt !a -- ^ Less Than
| Le !a -- ^ Less Than or Equal
deriving (Show, Eq, Ord)
renderPathScope :: PathScope -> BSERValue
renderPathScope BaseName = BSERString "basename"
renderPathScope WholeName = BSERString "wholename"
renderOperator :: Comparison a -> BSERValue
renderOperator (Eq _) = BSERString "eq"
renderOperator (Ne _) = BSERString "ne"
renderOperator (Gt _) = BSERString "gt"
renderOperator (Ge _) = BSERString "ge"
renderOperator (Lt _) = BSERString "lt"
renderOperator (Le _) = BSERString "le"
comparisonValue :: Integral n => Comparison n -> BSERValue
comparisonValue (Eq v) = compactBSERInt v
comparisonValue (Ne v) = compactBSERInt v
comparisonValue (Gt v) = compactBSERInt v
comparisonValue (Ge v) = compactBSERInt v
comparisonValue (Lt v) = compactBSERInt v
comparisonValue (Le v) = compactBSERInt v
renderExpression :: Expression -> BSERValue
renderExpression (EAllOf exprs) =
BSERArray (BSERString "allof" Seq.<| Seq.fromList (map renderExpression exprs))
renderExpression (EAnyOf exprs) =
BSERArray (BSERString "anyof" Seq.<| Seq.fromList (map renderExpression exprs))
renderExpression (EDirName (WFilePath p) (DirNameParams d caseSensitivity)) =
BSERArray (Seq.fromList [BSERString exprName, BSERString p, BSERArray (Seq.fromList [BSERString "depth", renderOperator d, comparisonValue d])])
where
exprName = case caseSensitivity of { CaseSensitive -> "dirname"; CaseInsensitive -> "idirname" }
renderExpression ETrue = BSERString "true"
renderExpression EFalse = BSERString "false"
renderExpression EEmpty = BSERString "empty"
renderExpression EExists = BSERString "exists"
renderExpression (EMatch pattern (MatchParams caseSensitivity pathScope includeDotFiles_ noEscape_)) =
BSERArray (Seq.fromList [BSERString exprName, BSERString pattern, renderPathScope pathScope] Seq.>< flags)
where
exprName = case caseSensitivity of { CaseSensitive -> "match"; CaseInsensitive -> "imatch" }
flagsMap = M.unions
[ if includeDotFiles_ then M.singleton "includedotfiles" (BSERBool True) else M.empty
, if noEscape_ then M.singleton "noescape" (BSERBool True) else M.empty
]
flags = if M.null flagsMap then Seq.empty else Seq.singleton (BSERObject flagsMap)
renderExpression (EName files (NameParams caseSensitivity pathScope)) =
BSERArray (Seq.fromList [BSERString exprName, BSERArray (Seq.fromList (map (BSERString . toByteString) files)), renderPathScope pathScope])
where
exprName = case caseSensitivity of { CaseSensitive -> "name"; CaseInsensitive -> "iname" }
renderExpression (ESince _) = error "TODO 928352935423"
renderExpression (ENot expr) =
BSERArray (Seq.fromList [BSERString "not", renderExpression expr])
renderExpression (ESize s) =
BSERArray (Seq.fromList [BSERString "size", renderOperator s, comparisonValue s])
renderExpression (ESuffix s) =
BSERArray (Seq.fromList [BSERString "suffix", BSERString s])
renderExpression (EType t) =
BSERArray (Seq.fromList [BSERString "type", BSERString (fileTypeChar t)])
|
bitc/hs-watchman
|
src/System/Directory/Watchman/Expression.hs
|
Haskell
|
mit
| 8,617
|
module Problem7 ( flattenList, NestedList(..) ) where
data NestedList a = Elem a | List [NestedList a]
flattenList :: NestedList a -> [a]
flattenList (Elem a) = [a]
flattenList (List a) = concatMap flattenList a
|
chanind/haskell-99-problems
|
Problem7.hs
|
Haskell
|
mit
| 213
|
module Sudoku.Strategy.HiddenSingle where
import Prelude
import Sudoku
import Sudoku.Strategy
import Data.List
concatRowCandidates :: Sudoku -> Int -> String
concatRowCandidates su i = concat [ findCandidates su i j | j<-[0..(columnCount su - 1)] ]
concatColumnCandidates :: Sudoku -> Int -> String
concatColumnCandidates su j = concat [ findCandidates su i j | i<-[0..(rowCount su - 1)] ]
concatBlockCandidates :: Sudoku -> Int -> Int -> String
concatBlockCandidates su i j
= concat [ findCandidates su k l | k<-[i'..i''], l<-[j'..j''] ]
where
h = blockHeight su
w = blockWidth su
i' = (div i h) * h
j' = (div j w) * w
i'' = i' + h - 1
j'' = j' + w - 1
findUniqueRowCandidates :: Sudoku -> Int -> Int -> String
findUniqueRowCandidates su i _ = concat $ filter (\x -> length x == 1) $ group $ sort $ concatRowCandidates su i
findUniqueColumnCandidates :: Sudoku -> Int -> Int -> String
findUniqueColumnCandidates su _ j = concat $ filter (\x -> length x == 1) $ group $ sort $ concatColumnCandidates su j
findUniqueBlockCandidates :: Sudoku -> Int -> Int -> String
findUniqueBlockCandidates su i j = concat $ filter (\x -> length x == 1) $ group $ sort $ concatBlockCandidates su i j
resolveCandidates :: Sudoku -> Int -> Int -> (Char, String)
resolveCandidates su i j | not $ null urci = (s, urci)
| not $ null ucci = (s, ucci)
| not $ null ubci = (s, ubci)
| otherwise = (s, cs)
where
s = su !! i !! j
cs = findCandidates su i j
urci = intersect cs (findUniqueRowCandidates su i j)
ucci = intersect cs (findUniqueColumnCandidates su i j)
ubci = intersect cs (findUniqueBlockCandidates su i j)
resolveAllCandidates :: Sudoku -> [[(Char, String)]]
resolveAllCandidates su = mapWithIndeces su (\i j -> resolveCandidates su i j)
solve :: Sudoku -> Sudoku
solve su = run su resolveAllCandidates
|
thomasbrus/sudoku-solver
|
src/Sudoku/Strategy/HiddenSingle.hs
|
Haskell
|
mit
| 2,092
|
{-# LANGUAGE OverloadedStrings #-}
module TypePlay.Check.SimplyTyped where
import Prelude hiding (map,foldl,elem)
import Data.List (map,foldl,(\\),union,elem)
import Data.Text (Text)
import qualified Data.Text as T
import Control.Monad (when)
import Control.Monad.Trans.Error (throwError)
import Data.Monoid ((<>))
type Sym = Text
data Expr
= Var Sym
| App Expr Expr
| Lam Sym Type Expr
deriving (Eq, Read, Show)
-- Simple Typing (t -> t B)
data Type
= Base
| Arrow Type Type
deriving (Eq, Read, Show)
-- x e e \x:t.e
-- Type checker will take an expression and return the type.
-- The TC will also need the types of all free vars in the expr.
-- Representing the environment is a list of vars and their respective types.
newtype Env = Env [(Sym, Type)] deriving (Show)
initialEnv :: Env
initialEnv = Env []
extend :: Sym -> Type -> Env -> Env
extend s t (Env r) = Env ((s,t) : r)
-- Type checking will be written using a monadic style
-- so we can have some error handling funnies.
type ErrorMsg = Text
type TC a = Either ErrorMsg a
findVar :: Env -> Sym -> TC Type
findVar (Env r) s =
case lookup s r of
Just t -> return t
Nothing -> throwError $ "Cannot find variable " <> s
tCheck :: Env -> Expr -> TC Type
tCheck r (Var s) =
findVar r s
tCheck r (App f a) = do
tf <- tCheck r f
case tf of
Arrow at rt -> do
ta <- tCheck r a
when (ta /= at) $ throwError "Bad function argument type"
return rt
_ -> throwError "Non-function in application"
tCheck r (Lam s t e) = do
let r' = extend s t r
te <- tCheck r' e
return $ Arrow t te
typeCheck :: Expr -> Type
typeCheck e =
case tCheck initialEnv e of
Left msg -> error ("Type error:\n" <> msg)
Right t -> t
-- (\x. \y.x)(\z.z)
-- App (Lam "x" $ Lam "y" $ Var "x") (Lam "z" $ Var "z")
-- In beta reduction, on the lambda form is a redex.
-- (\x.e)a reduces to e[x:=a]
-- This means that all (free) occurrences of 'x' in 'e' become 'a'
-- alpha-substitution, which is simply renaming a bound variable.
-- \x.x can be changed to \y.y
-- Start with normal order to WHNF. Weak Head Normal Form.
-- In WHNF - ensure there is no redex along the "spine" of the expr.
-- Starting from the root and following the left branch in applications.
-- Walk down the spine collecting arguments (right branch of App) until
-- we reach a lambda or a variable.
-- If we reach a variable we have WHNF so reconsititute the App again.
-- If we reach a lambda we get to the crux of evaluation
-- We need a Beta-reduction
-- for App (Lam v e) a then v[e:=a]
-- we use the `subst` function for this
whnf :: Expr -> Expr
whnf ee = spine ee []
where
spine (App f a) as = spine f (a:as)
spine (Lam s e) (a:as) = spine (subst s a e) as
spine f as = foldl App f as
-- Free variables are those that occur in an Expr, but are not bound
-- within that Expr. Collect them in a list.
freeVars :: Expr -> [Sym]
freeVars (Var s) = [s]
freeVars (App f a) = freeVars f `union` freeVars a
freeVars (Lam i e) = freeVars e \\ [i]
-- Now for substitution
-- subst b[v:=x]
subst :: Sym -> Expr -> Expr -> Expr
subst v x b = sub b
where
sub e@(Var i) = if i == v then x else e
sub (App f a) = App (sub f) (sub a)
sub (Lam i e) =
if v == i then Lam i e
else if i `elem` fvx then
let
i' = cloneSym e i
e' = substVar i i' e
in Lam i' (sub e')
else Lam i (sub e)
fvx = freeVars x
cloneSym e i = loop i
where
loop i' = if i' `elem` vars then loop (i <> "'") else i'
vars = fvx <> freeVars e
substVar :: Sym -> Sym -> Expr -> Expr
substVar s s' e = subst s (Var s') e
-- For comparing Exprs without alpha-conversions
alphaEq :: Expr -> Expr -> Bool
alphaEq (Var v) (Var v') = v == v'
alphaEq (App f a) (App f' a') = alphaEq f f' && alphaEq a a'
alphaEq (Lam s e) (Lam s' e') = alphaEq e (substVar s' s e')
alphaEq _ _ = False
-- Reduction to Normal Form (where no redexes remain)
nf :: Expr -> Expr
nf ee = spine ee []
where
spine (App f a) as = spine f (a:as)
spine (Lam s e) [] = Lam s (nf e)
spine (Lam s e) (a:as) = spine (subst s a e) as
spine f as = app f as
app f as = foldl App f (map nf as)
betaEq :: Expr -> Expr -> Bool
betaEq e1 e2 = alphaEq (nf e1) (nf e2)
-- Testing !
--[z,s,m,n] = map (Var . (:[])) "zsmn"
--app2 f x y = App (App f x) y
--zero = Lam "s" $ Lam "z" z
--one = Lam "s" $ Lam "z" $ App s z
--two = Lam "s" $ Lam "z" $ App s $ App s z
--three = Lam "s" $ Lam "z" $ App s $ App s $ App s z
--plus = Lam "m" $ Lam "n" $ Lam "s" $ Lam "z" $ app2 m s (app2 n s z)
|
mankyKitty/TypePlay
|
src/TypePlay/Check/SimplyTyped.hs
|
Haskell
|
mit
| 4,823
|
{-# LANGUAGE OverloadedStrings #-}
import System.Environment
import Globals
import ProcessChain
main :: IO ()
main = do
args <- getArgs
case args of
[] -> statsOn gDefaultUniverse
u:_ -> statsOn u
|
mihaimaruseac/petulant-octo-avenger
|
src/Main.hs
|
Haskell
|
mit
| 212
|
{-
This file handles the passing of parameters and manages things to do with
the fields within a header comment.
Author(s): Lewis Deane
License: MIT
Last Modified: 20/7/2016
-}
module FieldTools (getFields) where
import Control.Applicative
import Data.List (isPrefixOf, sortBy, elemIndex)
import Data.List.Split (splitOn)
import Data.String.Utils (replace)
import Data.Time.Calendar
import Data.Time.Clock
import qualified Config as C
data Field = Author | Documentation | Email | License | LastModified | Maintainer | Website deriving (Show, Eq)
data State = Visible | Hidden | Custom String deriving (Show, Eq)
data FieldState = FieldState { field :: Field, state :: State } deriving (Show)
type Params = [String]
-- Define the order in which we want the various fields to appear in.
fieldOrder :: [Field]
fieldOrder = [Author, Maintainer, Email, Website, License, Documentation, LastModified]
-- Sort the field order by the order in which we defined above.
sortFields :: [FieldState] -> [FieldState]
sortFields x = sortBy f x
where f a b = compare (k a) (k b)
k y = (field y) `elemIndex` fieldOrder
-- Parse the parameters passed to the function so we can work out what we want with each field.
parseParams :: Params -> [FieldState]
parseParams [] = []
parseParams (x:xs) = if "-" `isPrefixOf` x
then FieldState { field = field', state = Hidden } : (parseParams xs)
else if length xs > 0
then if (f . head) xs
then FieldState { field = field', state = Custom (head xs)} : (parseParams (tail xs))
else FieldState { field = field', state = Visible} : (parseParams xs)
else FieldState { field = field', state = Visible } : (parseParams xs)
where field' = (getFieldFromShortcut . tail) x
f a = not $ any (\x -> x `isPrefixOf` a) ["-", "+"]
-- Merges the default fields with the parameters we have just passed.
mergeWithDefaultFields :: [FieldState] -> IO [FieldState]
mergeWithDefaultFields fields = do
def <- map (\x -> FieldState {field = x, state = Visible}) <$> getDefaultFields
(return . sortFields) $ merge def fields
-- Define what we want to happen when we are merging fields.
merge :: [FieldState] -> [FieldState] -> [FieldState]
merge def fields = foldl f def fields
where f list e = if any (\z -> field z == field e) list then map (m e) list else e : list
m new old = if field new == field old
then new
else old
-- Define what value we want respective fields to take.
fieldValue :: Field -> IO String
fieldValue x | x == Author = author
| x == Documentation = doc
| x == Email = email
| x == Maintainer = maintainer
| x == License = license
| x == LastModified = date
| x == Website = website
| otherwise = error "No such field."
-- Define what title we want respective fields to have.
fieldTitle :: Field -> String
fieldTitle x | x == Author = "Author(s)"
| x == Documentation = "Documentation"
| x == Email = "Email"
| x == Maintainer = "Maintainer(s)"
| x == License = "License"
| x == LastModified = "Last Modified"
| x == Website = "Website"
| otherwise = error "No such field."
-- Gets the default fields.
getDefaultFields :: IO [Field]
getDefaultFields = (map getFieldFromShortcut . splitOn ",") <$> C.readValue "default-fields"
-- Gets the appropriate field from the shortcut.
getFieldFromShortcut :: String -> Field
getFieldFromShortcut x | x == "a" = Author
| x == "d" = Documentation
| x == "e" = Email
| x == "m" = Maintainer
| x == "l" = License
| x == "lm" = LastModified
| x == "w" = Website
| otherwise = error $ x ++ " is not a valid field."
-- Returns a list of fields that we want after filtering out ones not wanted.
getFields :: Params -> IO [(String, String)]
getFields fields = do
merged <- (mergeWithDefaultFields . parseParams) fields
let merged' = filter (not . isHidden) merged
isHidden = (\x -> state x == Hidden)
sequence $ map extractValue merged'
-- Pairs a field with it value. Uses the default value unless the state is 'Custom', then we take the custom value.
extractValue :: FieldState -> IO (String, String)
extractValue FieldState { field = a, state = b } = if isCustom b
then return $ (fieldTitle a, getCustomValue b)
else fieldValue a >>= (return . (\y -> (fieldTitle a, y)))
-- Decides if the state passed in is 'Custom' or not.
isCustom :: State -> Bool
isCustom (Custom _) = True
isCustom _ = False
-- Gets the value from inside the 'Custom' wrapper.
getCustomValue :: State -> String
getCustomValue (Custom x) = x
getCustomValue _ = error "Custom state not passed to function."
-- Gets the value associated with the 'author' key in the config file.
author :: IO String
author = C.readValue "author"
-- Gets the value associated with the 'documentation' key in the config file.
doc :: IO String
doc = C.readValue "doc"
-- Gets the value associated with the 'email' key in the config file.
email :: IO String
email = C.readValue "email"
-- Gets the value associated with the 'maintainer' key in the config file.
maintainer :: IO String
maintainer = C.readValue "maintainer"
-- Gets the value associated with the 'license' key in the config file.
license :: IO String
license = C.readValue "license"
-- Gets the value associated with the 'website' key in the config file.
website :: IO String
website = C.readValue "website"
-- Gets the value associated with the 'comment-width' key in the config file.
commentWidth :: IO String
commentWidth = C.readValue "comment-width"
-- Gets todays date in the format the user has specificied in the config file.
date :: IO String
date = do
ct <- getCurrentTime
dformat <- C.readValue "date-format"
let f (y, m, d) = (replace "dd" (show d) . replace "mm" (show m) . replace "yy" ((drop 2 . show) y) . replace "yyyy" (show y)) dformat
(return . f . toGregorian . utctDay) ct
|
lewisjdeane/Comet
|
FieldTools.hs
|
Haskell
|
mit
| 7,335
|
-- |
-- The types and functions are trivial and self-descriptive,
-- hence this sentence is the sole documentation you get on them.
module Success.Pure
(
Success,
-- * Creation
nothing,
failure,
success,
-- * Execution
asEither,
asMaybe,
)
where
import Prelude
import Data.Foldable
import Data.Traversable
import Control.Applicative
import Control.Monad
import Control.Monad.Error.Class
newtype Success a b =
Success (Either (Maybe a) b)
deriving (Functor, Applicative, Monad, MonadError (Maybe a), Show, Foldable, Traversable)
instance Alternative (Success a) where
{-# INLINE empty #-}
empty =
Success (Left Nothing)
{-# INLINE (<|>) #-}
(<|>) =
\case
Success (Right x) -> const (Success (Right x))
Success (Left _) -> id
instance MonadPlus (Success a) where
{-# INLINE mzero #-}
mzero =
empty
{-# INLINE mplus #-}
mplus =
(<|>)
{-# INLINE nothing #-}
nothing :: Success a b
nothing =
Success (Left Nothing)
{-# INLINE failure #-}
failure :: a -> Success a b
failure failure =
Success (Left (Just failure))
{-# INLINE success #-}
success :: b -> Success a b
success =
pure
{-# INLINE asEither #-}
asEither :: Success a b -> Either (Maybe a) b
asEither (Success x) =
x
{-# INLINE asMaybe #-}
asMaybe :: Success a b -> Maybe b
asMaybe (Success x) =
case x of
Left _ -> Nothing
Right x -> Just x
|
nikita-volkov/success
|
library/Success/Pure.hs
|
Haskell
|
mit
| 1,387
|
{-# LANGUAGE OverloadedStrings #-}
module Static (mkEmbedded) where
import Crypto.Hash.MD5 (hashlazy)
import qualified Data.ByteString.Base64 as B64
import qualified Data.ByteString.Lazy as BL
import qualified Data.Text as T
import qualified Data.Text.Encoding as T
import Network.Mime (MimeType)
import System.FilePath ((</>))
import WaiAppStatic.Storage.Embedded
hash :: BL.ByteString -> T.Text
hash = T.take 8 . T.decodeUtf8 . B64.encode . hashlazy
staticFiles :: [(FilePath, MimeType)]
staticFiles = [
("index.html" , "text/html")
, ("main.js" , "application/x-javascript")
, ("html-sanitizer.js" , "application/x-javascript")
, ("ext.js" , "application/x-javascript")
, ("FileSaver.js" , "application/x-javascript")
, ("main.css" , "text/css")
]
embedFile :: (FilePath, MimeType) -> IO EmbeddableEntry
embedFile (file, mime) = do
f <- BL.readFile $ "static" </> file
return $ EmbeddableEntry {
eLocation = T.pack file
, eMimeType = mime
, eContent = Left (hash f, f)
}
mkEmbedded :: IO [EmbeddableEntry]
mkEmbedded = mapM embedFile staticFiles
|
bitemyapp/hterm
|
Static.hs
|
Haskell
|
mit
| 1,326
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.