code stringlengths 5 1.03M | repo_name stringlengths 5 90 | path stringlengths 4 158 | license stringclasses 15 values | size int64 5 1.03M | n_ast_errors int64 0 53.9k | ast_max_depth int64 2 4.17k | n_whitespaces int64 0 365k | n_ast_nodes int64 3 317k | n_ast_terminals int64 1 171k | n_ast_nonterminals int64 1 146k | loc int64 -1 37.3k | cycloplexity int64 -1 1.31k |
|---|---|---|---|---|---|---|---|---|---|---|---|---|
module Parser.ParseDec
(parseDec)
where
import Data.Maybe (fromMaybe)
import qualified Data.Set as Set
import Data.Char (isAlpha, isDigit)
import Control.Monad (guard)
import Parser.Syntax
import Parser.Parse
import Parser.ParseSpace
import Parser.ParseIdent
import Parser.ParseType
import Parser.ParseStat
import Parser.ParseExpr
parseAccessModifier :: Parse Char Access
parseAccessModifier = greedy $ parseEither
(lits "publ" >> kspace >> return Public)
(return Private)
parseExtendableAccessModifier :: Parse Char ExtendableAccess
parseExtendableAccessModifier = greedy $ choice
[lits "publ" >> kspace >> return (NoExtend Public)
,lits "extd" >> kspace >> return Extend
,return (NoExtend Private)]
data ComplexAccessModifier = PublicWithExclusions (Set.Set String) | PrivateComplex
parseAccessExclusionOption :: Parse Char String
parseAccessExclusionOption = greedy $ many $ choice [litCond isAlpha, litCond isDigit, lit '_']
parseComplexAccessModifier :: Parse Char ComplexAccessModifier
parseComplexAccessModifier = greedy $ parseEither public (return PrivateComplex) where
public = do
lits "publ"
exclusions <- parseEither parseExclusions (kspace >> return Set.empty)
optional kspace
return $ PublicWithExclusions exclusions
parseExclusions = do
optional kspace
lit '('
optional kspace
lits "priv"
kspace
options <- kcommaSeparated $ parseAccessExclusionOption
guard $ not $ null options
optional kspace
lit ')'
return $ Set.fromList options
parseRuntimeArgDef :: Parse Char ArgumentDef
parseRuntimeArgDef = greedy $ do
mode <- parseEither (lit '#' >> optional kspace >> return NamedArg) (return PositionalArg)
name <- parseLocalIdent
optional kspace
lit ':'
optional kspace
t <- parseType
return $ RuntimeArgumentDef mode name t
parseStaticArgDef :: Parse Char ArgumentDef
parseStaticArgDef = greedy $ do
lits "static"
kspace
name <- optional $ do
lit '#'
optional kspace
name <- parseLocalIdent
optional kspace
lit ':'
optional kspace
return name
t <- parseType
return $ StaticArgumentDef name t
parseArgDef :: Parse Char ArgumentDef
parseArgDef = parseEither parseRuntimeArgDef parseStaticArgDef
parseArgDefList :: Parse Char [ArgumentDef]
parseArgDefList = kparenthesized parseArgDef
parseRetType :: Parse Char Type
parseRetType = fmap (fromMaybe VoidType) $ greedy $ optional $ do
optional kspace
lits "->"
optional kspace
parseType
parseFuncDec :: Parse Char TemplatizableDec
parseFuncDec = greedy $ do
access <- parseExtendableAccessModifier
lits "func"
kspace
name <- parseUnresolvedIdent
optional kspace
args <- parseArgDefList
retType <- parseRetType
optional kspace
body <- parseBody
return $ FuncDec access name args retType body
parseSpecialArg :: Parse Char SpecialArgument
parseSpecialArg = greedy $ do
lit '('
optional kspace
name <- parseLocalIdent
optional kspace
lit ':'
optional kspace
t <- parseType
optional kspace
lit ')'
return $ SpecialArgument name t
parseGetterDec :: Parse Char TemplatizableDec
parseGetterDec = greedy $ do
access <- parseExtendableAccessModifier
lits "getter"
optional kspace
receiver <- parseSpecialArg
optional kspace
lit '.'
optional kspace
name <- parseUnresolvedIdent
args <- fmap (fromMaybe []) $ optional (optional kspace >> parseArgDefList)
optional kspace
lits "->"
optional kspace
t <- parseType
optional kspace
body <- parseBody
return $ GetterDec access name receiver args t body
parseSetterMode :: Parse Char SetterMode
parseSetterMode = greedy $ do
mode <- parseEither
(lits "constr" >> return ConstructiveSetter)
(lits "destr" >> return DestructiveSetter)
kspace
return mode
parseSetterDec :: Parse Char TemplatizableDec
parseSetterDec = greedy $ do
access <- parseExtendableAccessModifier
setterMode <- parseSetterMode
lits "setter"
optional kspace
receiver <- parseSpecialArg
optional kspace
lit '.'
optional kspace
name <- parseUnresolvedIdent
args <- fmap (fromMaybe []) $ optional (optional kspace >> parseArgDefList)
optional kspace
lit '='
optional kspace
newValArg <- parseSpecialArg
optional kspace
body <- parseBody
return $ SetterDec access setterMode name receiver args newValArg body
parseMethodDec :: Parse Char TemplatizableDec
parseMethodDec = greedy $ do
lits "method"
kspace
name <- parseUnresolvedIdent
optional kspace
lit '('
optional kspace
lits "dynamic"
kspace
dynArg <- parseArgDef
mainArgs <- fmap (fromMaybe []) $ optional $ greedy $ do
optional kspace
lit ','
optional kspace
kcommaSeparated parseArgDef
optional kspace
lit ')'
retType <- parseRetType
optional kspace
body <- parseBody
return $ MethodDec name dynArg mainArgs retType body
parseStructCaseAccess :: Parse Char StructCaseAccess
parseStructCaseAccess = greedy $ do
rawOptions <- parseComplexAccessModifier
case rawOptions of
PublicWithExclusions exclusions -> do
guard $ Set.fromList ["make"] >= exclusions
let constructorAccess = if Set.member "make" exclusions
then PrivateConstructor
else PublicConstructor
return $ PublicCase constructorAccess
PrivateComplex -> return PrivateCase
parseFieldAccess :: Parse Char (GetterAccess, SetterAccess)
parseFieldAccess = greedy $ do
rawOptions <- parseComplexAccessModifier
case rawOptions of
PublicWithExclusions exclusions -> case Set.toList exclusions of
["getter"] -> return (PrivateGetter, PublicSetter)
["setter"] -> return (PublicGetter, PrivateSetter)
[] -> return (PublicGetter, PublicSetter)
_ -> parseFailure
PrivateComplex -> return (PrivateGetter, PrivateSetter)
-- This function should probably be refactored into some
-- utility file
partitionWith :: (x -> Either a b) -> [x] -> ([a], [b])
partitionWith _ [] = ([], [])
partitionWith f (x : xs) =
let (as, bs) = partitionWith f xs
in case f x of
Left a -> (a : as, bs)
Right b -> (as, b : bs)
parseFieldContent :: Parse Char FieldContent
parseFieldContent = greedy $ parseEither initializer fieldType where
initializer = do
lit '='
optional kspace
expr <- parseExpr
return $ FieldInitializer expr
fieldType = do
lit ':'
optional kspace
t <- parseType
return $ FieldType t
parseField :: Parse Char Field
parseField = greedy $ do
(getterAccess, setterAccess) <- parseFieldAccess
mode <- parseEither (lits "var" >> return VarBinding) (lits "let" >> return LetBinding)
kspace
name <- parseLocalIdent
optional kspace
content <- parseFieldContent
ksemicolon
return $ Field getterAccess setterAccess mode name content
parseStructSubcase :: Parse Char (LocalIdent, StructCase)
parseStructSubcase = greedy $ do
access <- parseStructCaseAccess
lits "case"
kspace
name <- parseLocalIdent
optional kspace
(fields, subCases) <- parseEither
parseStructCaseBody
(ksemicolon >> return ([], []))
return $ (name, StructCase access fields subCases)
parseStructElement :: Parse Char (Either Field (LocalIdent, StructCase))
parseStructElement = parseEither
(fmap Left parseField)
(fmap Right parseStructSubcase)
parseStructCaseBody :: Parse Char ([Field], [(LocalIdent, StructCase)])
parseStructCaseBody = greedy $ do
lit '{'
elements <- greedyMany $ do
optional kspace
parseStructElement
optional kspace
lit '}'
return $ partitionWith id elements
parseStructDec :: Parse Char TemplatizableDec
parseStructDec = greedy $ do
access <- parseStructCaseAccess
mode <- parseEither (lits "ref" >> kspace >> return RefStruct) (return ValueStruct)
lits "struct"
kspace
params <- greedy $ choice
-- marked greedy so that "struct $AB {}" cannot parse as "struct $A B {}"
[kparenthesized parseTemplateParam
,fmap (\p -> [p]) parseTemplateParam
,return []]
optional kspace
name <- parseLocalIdent
optional kspace
(fields, subCases) <- parseStructCaseBody
return $ StructDec mode params name $ StructCase access fields subCases
parseFuncReq :: Parse Char ProtocolRequirement
parseFuncReq = greedy $ do
lits "func"
kspace
name <- parseUnresolvedIdent
optional kspace
args <- kparenthesized parseArgDefInterface
retType <- parseRetType
ksemicolon
return $ FuncRequirement name args retType
parseOptArgInterfaces :: Parse Char [ArgumentDefInterface]
parseOptArgInterfaces =
fmap (fromMaybe []) $ optional $ optional kspace >> kparenthesized parseArgDefInterface
parseGetterReq :: Parse Char ProtocolRequirement
parseGetterReq = greedy $ do
lits "getter"
kspace
lit '('
optional kspace
receiver <- parseType
optional kspace
lit ')'
optional kspace
lit '.'
optional kspace
name <- parseUnresolvedIdent
args <- parseOptArgInterfaces
optional kspace
lits "->"
optional kspace
retType <- parseType
ksemicolon
return $ GetterRequirement name receiver args retType
parseSetterReq :: Parse Char ProtocolRequirement
parseSetterReq = greedy $ do
mode <- optional $ parseSetterMode
lits "setter"
kspace
lit '('
optional kspace
receiver <- parseType
optional kspace
lit ')'
optional kspace
lit '.'
optional kspace
name <- parseUnresolvedIdent
args <- parseOptArgInterfaces
optional kspace
lit '='
optional kspace
t <- parseType
ksemicolon
return $ SetterRequirement mode name receiver args t
parseExternalProtocolReq :: Parse Char ProtocolRequirement
parseExternalProtocolReq = greedy $ do
lits "protocol"
kspace
name <- parseUnresolvedIdent
optional kspace
params <- kparenthesized parseType
ksemicolon
return $ ExternalProtocolRequirement name params
parseProtocolReq :: Parse Char ProtocolRequirement
parseProtocolReq = greedy $ choice
[parseFuncReq
,parseGetterReq
,parseSetterReq
,parseExternalProtocolReq]
parseProtocolReqs :: Parse Char [ProtocolRequirement]
parseProtocolReqs = greedy $ do
lit '{'
requirements <- many $ optional kspace >> parseProtocolReq
optional kspace
lit '}'
return requirements
parseTemplateParam :: Parse Char LocalIdent
parseTemplateParam = lit '$' >> parseLocalIdent
parseProtocolDec :: Parse Char Dec
parseProtocolDec = greedy $ do
access <- parseAccessModifier
lits "protocol"
kspace
name <- parseLocalIdent
optional kspace
params <- kparenthesized $ lit '$' >> parseLocalIdent
optional kspace
requirements <- parseProtocolReqs
return $ ProtocolDec access name params requirements
parseOpenDec :: Parse Char Dec
parseOpenDec = greedy $ do
lits "open"
kspace
openType <- choice
[lits "func" >> return OpenFunc
,lits "getter" >> return OpenGetter
,lits "setter" >> return OpenSetter]
kspace
name <- parseLocalIdent
ksemicolon
return $ OpenDec openType name
parseTemplatizableDec :: Parse Char TemplatizableDec
parseTemplatizableDec = choice
[parseFuncDec
,parseStructDec
,parseGetterDec
,parseSetterDec
,parseMethodDec]
parseTemplatizedDec :: Parse Char Dec
parseTemplatizedDec = greedy $ do
requirements <- fmap (fromMaybe []) $ greedy $ optional $ do
lits "given"
optional kspace
reqs <- parseProtocolReqs
optional kspace
return reqs
dec <- parseTemplatizableDec
return $ TemplatizedDec requirements dec
parseDec :: Parse Char Dec
parseDec = choice
[parseTemplatizedDec
,parseProtocolDec
,parseOpenDec]
| Kiwi-Labs/KWICK | kwick/parser/ParseDec.hs | mit | 11,070 | 38 | 18 | 1,801 | 3,493 | 1,592 | 1,901 | 378 | 5 |
{-# LANGUAGE DataKinds #-}
{-# LANGUAGE DerivingStrategies #-}
{-# LANGUAGE ExistentialQuantification #-}
{-# LANGUAGE FlexibleInstances #-}
{-# LANGUAGE GeneralizedNewtypeDeriving #-}
{-# LANGUAGE MultiParamTypeClasses #-}
{-# LANGUAGE OverloadedStrings #-}
{-# LANGUAGE QuasiQuotes #-}
{-# LANGUAGE StandaloneDeriving #-}
{-# LANGUAGE TemplateHaskell #-}
{-# LANGUAGE TypeApplications #-}
{-# LANGUAGE TypeFamilies #-}
{-# LANGUAGE UndecidableInstances #-}
module Database.Persist.TH.PersistWith.Model where
import TemplateTestImports
import Database.Persist.TH.PersistWith.Model2
mkPersistWith sqlSettings $(discoverEntities) [persistLowerCase|
IceCream
flavor FlavorId
|]
| yesodweb/persistent | persistent/test/Database/Persist/TH/PersistWith/Model.hs | mit | 687 | 0 | 7 | 78 | 51 | 38 | 13 | 17 | 0 |
--
--
--
-----------------
-- Exercise 10.6.
-----------------
--
--
--
module E'10''6 where
-- ( A ):
-- ------
squares :: ( Num t , Ord t ) => [t] -> [t]
squares numbers
= map square numbers
where
square :: ( Num t , Ord t ) => t -> t
square number = number * number
{- GHCi>
squares []
squares [ 0 , 1 ]
squares [ 1 , 2 ]
-}
-- []
-- [0,1]
-- [1,4]
-- ( B ):
-- ------
sumSquares :: ( Num t , Ord t ) => [t] -> t
sumSquares numbers
= sum ( squares numbers )
{- GHCi>
sumSquares []
sumSquares [ 0 , 1 ]
sumSquares [ 1 , 2 ]
-}
-- 0
-- 1
-- 5
-- ( C ):
-- ------
arePositive :: ( Num t , Ord t ) => [t] -> Bool
arePositive numbers
= filter lesserOrEqualZero numbers == []
where
lesserOrEqualZero :: ( Num t , Ord t ) => t -> Bool
lesserOrEqualZero number = number <= 0
{- GHCi>
arePositive []
arePositive [ 0 , 1 ]
arePositive [ 1 , 2 ]
-}
-- True
-- False
-- True
-- Other solution for "arePositive":
arePositive2 :: ( Num t , Ord t ) => [t] -> Bool
arePositive2 numbers
= Prelude.and ( map positive numbers )
where
positive :: ( Num t , Ord t ) => t -> Bool
positive number = number > 0
| pascal-knodel/haskell-craft | _/links/E'10''6.hs | mit | 1,158 | 0 | 8 | 317 | 327 | 186 | 141 | 19 | 1 |
-----------------------------------------------------------------------------
--
-- Stg to C-- code generation: bindings
--
-- (c) The University of Glasgow 2004-2006
--
-----------------------------------------------------------------------------
{-# OPTIONS -fno-warn-tabs #-}
-- The above warning supression flag is a temporary kludge.
-- While working on this module you are encouraged to remove it and
-- detab the module (please do the detabbing in a separate patch). See
-- http://hackage.haskell.org/trac/ghc/wiki/Commentary/CodingStyle#TabsvsSpaces
-- for details
module StgCmmBind (
cgTopRhsClosure,
cgBind,
emitBlackHoleCode,
pushUpdateFrame
) where
#include "HsVersions.h"
import StgCmmExpr
import StgCmmMonad
import StgCmmEnv
import StgCmmCon
import StgCmmHeap
import StgCmmProf
import StgCmmTicky
import StgCmmGran
import StgCmmLayout
import StgCmmUtils
import StgCmmClosure
import StgCmmForeign (emitPrimCall)
import MkGraph
import CoreSyn ( AltCon(..) )
import SMRep
import Cmm
import CmmUtils
import CLabel
import StgSyn
import CostCentre
import Id
import Control.Monad
import Name
import Module
import ListSetOps
import Util
import BasicTypes
import Constants
import Outputable
import FastString
import Maybes
import DynFlags
import StaticFlags
------------------------------------------------------------------------
-- Top-level bindings
------------------------------------------------------------------------
-- For closures bound at top level, allocate in static space.
-- They should have no free variables.
cgTopRhsClosure :: Id
-> CostCentreStack -- Optional cost centre annotation
-> StgBinderInfo
-> UpdateFlag
-> SRT
-> [Id] -- Args
-> StgExpr
-> FCode CgIdInfo
cgTopRhsClosure id ccs _ upd_flag srt args body = do
{ -- LAY OUT THE OBJECT
let name = idName id
; lf_info <- mkClosureLFInfo id TopLevel [] upd_flag args
; srt_info <- getSRTInfo srt
; mod_name <- getModuleName
; let descr = closureDescription mod_name name
closure_info = mkClosureInfo True id lf_info 0 0 srt_info descr
closure_label = mkLocalClosureLabel name (idCafInfo id)
cg_id_info = litIdInfo id lf_info (CmmLabel closure_label)
caffy = idCafInfo id
info_tbl = mkCmmInfo closure_info -- XXX short-cut
closure_rep = mkStaticClosureFields info_tbl ccs caffy []
-- BUILD THE OBJECT, AND GENERATE INFO TABLE (IF NECESSARY)
; emitDataLits closure_label closure_rep
; let fv_details :: [(NonVoid Id, VirtualHpOffset)]
(_, _, fv_details) = mkVirtHeapOffsets (isLFThunk lf_info)
(addIdReps [])
-- Don't drop the non-void args until the closure info has been made
; forkClosureBody (closureCodeBody True id closure_info ccs
(nonVoidIds args) (length args) body fv_details)
; returnFC cg_id_info }
------------------------------------------------------------------------
-- Non-top-level bindings
------------------------------------------------------------------------
cgBind :: StgBinding -> FCode ()
cgBind (StgNonRec name rhs)
= do { ((info, init), body) <- getCodeR $ cgRhs name rhs
; addBindC (cg_id info) info
; emit (init <*> body) }
cgBind (StgRec pairs)
= do { ((new_binds, inits), body) <- getCodeR $ fixC (\ new_binds_inits ->
do { addBindsC $ fst new_binds_inits -- avoid premature deconstruction
; liftM unzip $ listFCs [ cgRhs b e | (b,e) <- pairs ] })
; addBindsC new_binds
; emit (catAGraphs inits <*> body) }
{- Recursive let-bindings are tricky.
Consider the following pseudocode:
let x = \_ -> ... y ...
y = \_ -> ... z ...
z = \_ -> ... x ...
in ...
For each binding, we need to allocate a closure, and each closure must
capture the address of the other closures.
We want to generate the following C-- code:
// Initialization Code
x = hp - 24; // heap address of x's closure
y = hp - 40; // heap address of x's closure
z = hp - 64; // heap address of x's closure
// allocate and initialize x
m[hp-8] = ...
m[hp-16] = y // the closure for x captures y
m[hp-24] = x_info;
// allocate and initialize y
m[hp-32] = z; // the closure for y captures z
m[hp-40] = y_info;
// allocate and initialize z
...
For each closure, we must generate not only the code to allocate and
initialize the closure itself, but also some Initialization Code that
sets a variable holding the closure pointer.
The complication here is that we don't know the heap offsets a priori,
which has two consequences:
1. we need a fixpoint
2. we can't trivially separate the Initialization Code from the
code that compiles the right-hand-sides
Note: We don't need this complication with let-no-escapes, because
in that case, the names are bound to labels in the environment,
and we don't need to emit any code to witness that binding.
-}
--------------------
cgRhs :: Id -> StgRhs -> FCode (CgIdInfo, CmmAGraph)
-- The Id is passed along so a binding can be set up
-- The returned values are the binding for the environment
-- and the Initialization Code that witnesses the binding
cgRhs name (StgRhsCon cc con args)
= buildDynCon name cc con args
cgRhs name (StgRhsClosure cc bi fvs upd_flag srt args body)
= mkRhsClosure name cc bi (nonVoidIds fvs) upd_flag srt args body
------------------------------------------------------------------------
-- Non-constructor right hand sides
------------------------------------------------------------------------
mkRhsClosure :: Id -> CostCentreStack -> StgBinderInfo
-> [NonVoid Id] -- Free vars
-> UpdateFlag -> SRT
-> [Id] -- Args
-> StgExpr
-> FCode (CgIdInfo, CmmAGraph)
{- mkRhsClosure looks for two special forms of the right-hand side:
a) selector thunks
b) AP thunks
If neither happens, it just calls mkClosureLFInfo. You might think
that mkClosureLFInfo should do all this, but it seems wrong for the
latter to look at the structure of an expression
Note [Selectors]
~~~~~~~~~~~~~~~~
We look at the body of the closure to see if it's a selector---turgid,
but nothing deep. We are looking for a closure of {\em exactly} the
form:
... = [the_fv] \ u [] ->
case the_fv of
con a_1 ... a_n -> a_i
Note [Ap thunks]
~~~~~~~~~~~~~~~~
A more generic AP thunk of the form
x = [ x_1...x_n ] \.. [] -> x_1 ... x_n
A set of these is compiled statically into the RTS, so we just use
those. We could extend the idea to thunks where some of the x_i are
global ids (and hence not free variables), but this would entail
generating a larger thunk. It might be an option for non-optimising
compilation, though.
We only generate an Ap thunk if all the free variables are pointers,
for semi-obvious reasons.
-}
---------- Note [Selectors] ------------------
mkRhsClosure bndr cc bi
[NonVoid the_fv] -- Just one free var
upd_flag -- Updatable thunk
_srt
[] -- A thunk
body@(StgCase (StgApp scrutinee [{-no args-}])
_ _ _ _ -- ignore uniq, etc.
(AlgAlt _)
[(DataAlt _, params, _use_mask,
(StgApp selectee [{-no args-}]))])
| the_fv == scrutinee -- Scrutinee is the only free variable
&& maybeToBool maybe_offset -- Selectee is a component of the tuple
&& offset_into_int <= mAX_SPEC_SELECTEE_SIZE -- Offset is small enough
= -- NOT TRUE: ASSERT(is_single_constructor)
-- The simplifier may have statically determined that the single alternative
-- is the only possible case and eliminated the others, even if there are
-- other constructors in the datatype. It's still ok to make a selector
-- thunk in this case, because we *know* which constructor the scrutinee
-- will evaluate to.
--
-- srt is discarded; it must be empty
cgStdThunk bndr cc bi body lf_info [StgVarArg the_fv]
where
lf_info = mkSelectorLFInfo bndr offset_into_int
(isUpdatable upd_flag)
(_, _, params_w_offsets) = mkVirtConstrOffsets (addIdReps params)
-- Just want the layout
maybe_offset = assocMaybe params_w_offsets (NonVoid selectee)
Just the_offset = maybe_offset
offset_into_int = the_offset - fixedHdrSize
---------- Note [Ap thunks] ------------------
mkRhsClosure bndr cc bi
fvs
upd_flag
_srt
[] -- No args; a thunk
body@(StgApp fun_id args)
| args `lengthIs` (arity-1)
&& all (isGcPtrRep . idPrimRep . stripNV) fvs
&& isUpdatable upd_flag
&& arity <= mAX_SPEC_AP_SIZE
&& not opt_SccProfilingOn -- not when profiling: we don't want to
-- lose information about this particular
-- thunk (e.g. its type) (#949)
-- Ha! an Ap thunk
= cgStdThunk bndr cc bi body lf_info payload
where
lf_info = mkApLFInfo bndr upd_flag arity
-- the payload has to be in the correct order, hence we can't
-- just use the fvs.
payload = StgVarArg fun_id : args
arity = length fvs
---------- Default case ------------------
mkRhsClosure bndr cc _ fvs upd_flag srt args body
= do { -- LAY OUT THE OBJECT
-- If the binder is itself a free variable, then don't store
-- it in the closure. Instead, just bind it to Node on entry.
-- NB we can be sure that Node will point to it, because we
-- haven't told mkClosureLFInfo about this; so if the binder
-- _was_ a free var of its RHS, mkClosureLFInfo thinks it *is*
-- stored in the closure itself, so it will make sure that
-- Node points to it...
; let
is_elem = isIn "cgRhsClosure"
bndr_is_a_fv = (NonVoid bndr) `is_elem` fvs
reduced_fvs | bndr_is_a_fv = fvs `minusList` [NonVoid bndr]
| otherwise = fvs
-- MAKE CLOSURE INFO FOR THIS CLOSURE
; lf_info <- mkClosureLFInfo bndr NotTopLevel fvs upd_flag args
; mod_name <- getModuleName
; c_srt <- getSRTInfo srt
; let name = idName bndr
descr = closureDescription mod_name name
fv_details :: [(NonVoid Id, VirtualHpOffset)]
(tot_wds, ptr_wds, fv_details)
= mkVirtHeapOffsets (isLFThunk lf_info)
(addIdReps (map stripNV reduced_fvs))
closure_info = mkClosureInfo False -- Not static
bndr lf_info tot_wds ptr_wds
c_srt descr
-- BUILD ITS INFO TABLE AND CODE
; forkClosureBody $
-- forkClosureBody: (a) ensure that bindings in here are not seen elsewhere
-- (b) ignore Sequel from context; use empty Sequel
-- And compile the body
closureCodeBody False bndr closure_info cc (nonVoidIds args)
(length args) body fv_details
-- BUILD THE OBJECT
-- ; (use_cc, blame_cc) <- chooseDynCostCentres cc args body
; let use_cc = curCCS; blame_cc = curCCS
; emit (mkComment $ mkFastString "calling allocDynClosure")
; let toVarArg (NonVoid a, off) = (NonVoid (StgVarArg a), off)
; let info_tbl = mkCmmInfo closure_info
; (tmp, init) <- allocDynClosure info_tbl lf_info use_cc blame_cc
(map toVarArg fv_details)
-- RETURN
; regIdInfo bndr lf_info tmp init }
-- Use with care; if used inappropriately, it could break invariants.
stripNV :: NonVoid a -> a
stripNV (NonVoid a) = a
-------------------------
cgStdThunk
:: Id
-> CostCentreStack -- Optional cost centre annotation
-> StgBinderInfo -- XXX: not used??
-> StgExpr
-> LambdaFormInfo
-> [StgArg] -- payload
-> FCode (CgIdInfo, CmmAGraph)
cgStdThunk bndr _cc _bndr_info _body lf_info payload
= do -- AHA! A STANDARD-FORM THUNK
{ -- LAY OUT THE OBJECT
mod_name <- getModuleName
; let (tot_wds, ptr_wds, payload_w_offsets)
= mkVirtHeapOffsets (isLFThunk lf_info) (addArgReps payload)
descr = closureDescription mod_name (idName bndr)
closure_info = mkClosureInfo False -- Not static
bndr lf_info tot_wds ptr_wds
NoC_SRT -- No SRT for a std-form closure
descr
-- ; (use_cc, blame_cc) <- chooseDynCostCentres cc [{- no args-}] body
; let use_cc = curCCS; blame_cc = curCCS
-- BUILD THE OBJECT
; let info_tbl = mkCmmInfo closure_info
; (tmp, init) <- allocDynClosure info_tbl lf_info
use_cc blame_cc payload_w_offsets
-- RETURN
; regIdInfo bndr lf_info tmp init }
mkClosureLFInfo :: Id -- The binder
-> TopLevelFlag -- True of top level
-> [NonVoid Id] -- Free vars
-> UpdateFlag -- Update flag
-> [Id] -- Args
-> FCode LambdaFormInfo
mkClosureLFInfo bndr top fvs upd_flag args
| null args = return (mkLFThunk (idType bndr) top (map stripNV fvs) upd_flag)
| otherwise = do { arg_descr <- mkArgDescr (idName bndr) args
; return (mkLFReEntrant top (map stripNV fvs) args arg_descr) }
------------------------------------------------------------------------
-- The code for closures}
------------------------------------------------------------------------
closureCodeBody :: Bool -- whether this is a top-level binding
-> Id -- the closure's name
-> ClosureInfo -- Lots of information about this closure
-> CostCentreStack -- Optional cost centre attached to closure
-> [NonVoid Id] -- incoming args to the closure
-> Int -- arity, including void args
-> StgExpr
-> [(NonVoid Id, VirtualHpOffset)] -- the closure's free vars
-> FCode ()
{- There are two main cases for the code for closures.
* If there are *no arguments*, then the closure is a thunk, and not in
normal form. So it should set up an update frame (if it is
shared). NB: Thunks cannot have a primitive type!
* If there is *at least one* argument, then this closure is in
normal form, so there is no need to set up an update frame.
The Macros for GrAnSim are produced at the beginning of the
argSatisfactionCheck (by calling fetchAndReschedule).
There info if Node points to closure is available. -- HWL -}
closureCodeBody top_lvl bndr cl_info cc args arity body fv_details
| length args == 0 -- No args i.e. thunk
= emitClosureProcAndInfoTable top_lvl bndr lf_info info_tbl [] $
\(_, node, _) -> thunkCode cl_info fv_details cc node arity body
where
lf_info = closureLFInfo cl_info
info_tbl = mkCmmInfo cl_info
closureCodeBody top_lvl bndr cl_info _cc args arity body fv_details
= ASSERT( length args > 0 )
do { -- Allocate the global ticky counter,
-- and establish the ticky-counter
-- label for this block
; dflags <- getDynFlags
; let platform = targetPlatform dflags
ticky_ctr_lbl = closureRednCountsLabel platform cl_info
; emitTickyCounter cl_info (map stripNV args)
; setTickyCtrLabel ticky_ctr_lbl $ do
; let
lf_info = closureLFInfo cl_info
info_tbl = mkCmmInfo cl_info
-- Emit the main entry code
; emitClosureProcAndInfoTable top_lvl bndr lf_info info_tbl args $
\(offset, node, arg_regs) -> do
-- Emit slow-entry code (for entering a closure through a PAP)
{ mkSlowEntryCode cl_info arg_regs
; let lf_info = closureLFInfo cl_info
node_points = nodeMustPointToIt lf_info
node' = if node_points then Just node else Nothing
; tickyEnterFun cl_info
; whenC node_points (ldvEnterClosure cl_info)
; granYield arg_regs node_points
-- Main payload
; entryHeapCheck cl_info offset node' arity arg_regs $ do
{ fv_bindings <- mapM bind_fv fv_details
-- Load free vars out of closure *after*
-- heap check, to reduce live vars over check
; if node_points then load_fvs node lf_info fv_bindings
else return ()
; cgExpr body }}
}
-- A function closure pointer may be tagged, so we
-- must take it into account when accessing the free variables.
bind_fv :: (NonVoid Id, VirtualHpOffset) -> FCode (LocalReg, WordOff)
bind_fv (id, off) = do { reg <- rebindToReg id; return (reg, off) }
load_fvs :: LocalReg -> LambdaFormInfo -> [(LocalReg, WordOff)] -> FCode ()
load_fvs node lf_info = mapCs (\ (reg, off) ->
emit $ mkTaggedObjectLoad reg node off tag)
where tag = lfDynTag lf_info
-----------------------------------------
-- The "slow entry" code for a function. This entry point takes its
-- arguments on the stack. It loads the arguments into registers
-- according to the calling convention, and jumps to the function's
-- normal entry point. The function's closure is assumed to be in
-- R1/node.
--
-- The slow entry point is used for unknown calls: eg. stg_PAP_entry
mkSlowEntryCode :: ClosureInfo -> [LocalReg] -> FCode ()
-- If this function doesn't have a specialised ArgDescr, we need
-- to generate the function's arg bitmap and slow-entry code.
-- Here, we emit the slow-entry code.
mkSlowEntryCode _ [] = panic "entering a closure with no arguments?"
mkSlowEntryCode cl_info arg_regs -- function closure is already in `Node'
| Just (_, ArgGen _) <- closureFunInfo cl_info
= do dflags <- getDynFlags
let platform = targetPlatform dflags
slow_lbl = closureSlowEntryLabel platform cl_info
fast_lbl = closureLocalEntryLabel platform cl_info
-- mkDirectJump does not clobber `Node' containing function closure
jump = mkDirectJump (mkLblExpr fast_lbl)
(map (CmmReg . CmmLocal) arg_regs)
initUpdFrameOff
emitProcWithConvention Slow CmmNonInfoTable slow_lbl arg_regs jump
| otherwise = return ()
-----------------------------------------
thunkCode :: ClosureInfo -> [(NonVoid Id, VirtualHpOffset)] -> CostCentreStack
-> LocalReg -> Int -> StgExpr -> FCode ()
thunkCode cl_info fv_details _cc node arity body
= do { let node_points = nodeMustPointToIt (closureLFInfo cl_info)
node' = if node_points then Just node else Nothing
; tickyEnterThunk cl_info
; ldvEnterClosure cl_info -- NB: Node always points when profiling
; granThunk node_points
-- Heap overflow check
; entryHeapCheck cl_info 0 node' arity [] $ do
{ -- Overwrite with black hole if necessary
-- but *after* the heap-overflow check
; whenC (blackHoleOnEntry cl_info && node_points)
(blackHoleIt cl_info)
-- Push update frame
; setupUpdate cl_info node $
-- We only enter cc after setting up update so
-- that cc of enclosing scope will be recorded
-- in update frame CAF/DICT functions will be
-- subsumed by this enclosing cc
do { enterCostCentreThunk (CmmReg nodeReg)
; let lf_info = closureLFInfo cl_info
; fv_bindings <- mapM bind_fv fv_details
; load_fvs node lf_info fv_bindings
; cgExpr body }}}
------------------------------------------------------------------------
-- Update and black-hole wrappers
------------------------------------------------------------------------
blackHoleIt :: ClosureInfo -> FCode ()
-- Only called for closures with no args
-- Node points to the closure
blackHoleIt closure_info = emitBlackHoleCode (closureSingleEntry closure_info)
emitBlackHoleCode :: Bool -> FCode ()
emitBlackHoleCode is_single_entry = do
dflags <- getDynFlags
-- Eager blackholing is normally disabled, but can be turned on with
-- -feager-blackholing. When it is on, we replace the info pointer
-- of the thunk with stg_EAGER_BLACKHOLE_info on entry.
-- If we wanted to do eager blackholing with slop filling, we'd need
-- to do it at the *end* of a basic block, otherwise we overwrite
-- the free variables in the thunk that we still need. We have a
-- patch for this from Andy Cheadle, but not incorporated yet. --SDM
-- [6/2004]
--
-- Previously, eager blackholing was enabled when ticky-ticky was
-- on. But it didn't work, and it wasn't strictly necessary to bring
-- back minimal ticky-ticky, so now EAGER_BLACKHOLING is
-- unconditionally disabled. -- krc 1/2007
-- Note the eager-blackholing check is here rather than in blackHoleOnEntry,
-- because emitBlackHoleCode is called from CmmParse.
let eager_blackholing = not opt_SccProfilingOn
&& dopt Opt_EagerBlackHoling dflags
-- Profiling needs slop filling (to support LDV
-- profiling), so currently eager blackholing doesn't
-- work with profiling.
whenC eager_blackholing $ do
tickyBlackHole (not is_single_entry)
emit (mkStore (cmmOffsetW (CmmReg nodeReg) fixedHdrSize)
(CmmReg (CmmGlobal CurrentTSO)))
emitPrimCall [] MO_WriteBarrier []
emit (mkStore (CmmReg nodeReg) (CmmReg (CmmGlobal EagerBlackholeInfo)))
setupUpdate :: ClosureInfo -> LocalReg -> FCode () -> FCode ()
-- Nota Bene: this function does not change Node (even if it's a CAF),
-- so that the cost centre in the original closure can still be
-- extracted by a subsequent enterCostCentre
setupUpdate closure_info node body
| closureReEntrant closure_info
= body
| not (isStaticClosure closure_info)
= if not (closureUpdReqd closure_info)
then do tickyUpdateFrameOmitted; body
else do
tickyPushUpdateFrame
--dflags <- getDynFlags
let es = [CmmReg (CmmLocal node), mkLblExpr mkUpdInfoLabel]
--if not opt_SccProfilingOn && dopt Opt_EagerBlackHoling dflags
-- then pushUpdateFrame es body -- XXX black hole
-- else pushUpdateFrame es body
pushUpdateFrame es body
| otherwise -- A static closure
= do { tickyUpdateBhCaf closure_info
; if closureUpdReqd closure_info
then do -- Blackhole the (updatable) CAF:
{ upd_closure <- link_caf True
; pushUpdateFrame [CmmReg (CmmLocal upd_closure),
mkLblExpr mkUpdInfoLabel] body } -- XXX black hole
else do {tickyUpdateFrameOmitted; body}
}
-----------------------------------------------------------------------------
-- Setting up update frames
-- Push the update frame on the stack in the Entry area,
-- leaving room for the return address that is already
-- at the old end of the area.
pushUpdateFrame :: [CmmExpr] -> FCode () -> FCode ()
pushUpdateFrame es body
= do -- [EZY] I'm not sure if we need to special-case for BH too
updfr <- getUpdFrameOff
offset <- foldM push updfr es
withUpdFrameOff offset body
where push off e =
do emit (mkStore (CmmStackSlot (CallArea Old) base) e)
return base
where base = off + widthInBytes (cmmExprWidth e)
-----------------------------------------------------------------------------
-- Entering a CAF
--
-- When a CAF is first entered, it creates a black hole in the heap,
-- and updates itself with an indirection to this new black hole.
--
-- We update the CAF with an indirection to a newly-allocated black
-- hole in the heap. We also set the blocking queue on the newly
-- allocated black hole to be empty.
--
-- Why do we make a black hole in the heap when we enter a CAF?
--
-- - for a generational garbage collector, which needs a fast
-- test for whether an updatee is in an old generation or not
--
-- - for the parallel system, which can implement updates more
-- easily if the updatee is always in the heap. (allegedly).
--
-- When debugging, we maintain a separate CAF list so we can tell when
-- a CAF has been garbage collected.
-- newCAF must be called before the itbl ptr is overwritten, since
-- newCAF records the old itbl ptr in order to do CAF reverting
-- (which Hugs needs to do in order that combined mode works right.)
--
-- ToDo [Feb 04] This entire link_caf nonsense could all be moved
-- into the "newCAF" RTS procedure, which we call anyway, including
-- the allocation of the black-hole indirection closure.
-- That way, code size would fall, the CAF-handling code would
-- be closer together, and the compiler wouldn't need to know
-- about off_indirectee etc.
link_caf :: Bool -- True <=> updatable, False <=> single-entry
-> FCode LocalReg -- Returns amode for closure to be updated
-- To update a CAF we must allocate a black hole, link the CAF onto the
-- CAF list, then update the CAF to point to the fresh black hole.
-- This function returns the address of the black hole, so it can be
-- updated with the new value when available. The reason for all of this
-- is that we only want to update dynamic heap objects, not static ones,
-- so that generational GC is easier.
link_caf _is_upd = do
{ -- Alloc black hole specifying CC_HDR(Node) as the cost centre
; let use_cc = costCentreFrom (CmmReg nodeReg)
blame_cc = use_cc
tso = CmmReg (CmmGlobal CurrentTSO)
; (hp_rel, init) <- allocDynClosureCmm cafBlackHoleInfoTable mkLFBlackHole
use_cc blame_cc [(tso,fixedHdrSize)]
; emit init
-- Call the RTS function newCAF to add the CAF to the CafList
-- so that the garbage collector can find them
-- This must be done *before* the info table pointer is overwritten,
-- because the old info table ptr is needed for reversion
; ret <- newTemp bWord
; emitRtsCallGen [(ret,NoHint)] rtsPackageId (fsLit "newCAF")
[ (CmmReg (CmmGlobal BaseReg), AddrHint),
(CmmReg nodeReg, AddrHint),
(CmmReg (CmmLocal hp_rel), AddrHint) ]
(Just [node]) False
-- node is live, so save it.
-- see Note [atomic CAF entry] in rts/sm/Storage.c
; emit $ mkCmmIfThen
(CmmMachOp mo_wordEq [ CmmReg (CmmLocal ret), CmmLit zeroCLit]) $
-- re-enter R1. Doing this directly is slightly dodgy; we're
-- assuming lots of things, like the stack pointer hasn't
-- moved since we entered the CAF.
let target = entryCode (closureInfoPtr (CmmReg nodeReg)) in
mkJump target [] 0
; return hp_rel }
------------------------------------------------------------------------
-- Profiling
------------------------------------------------------------------------
-- For "global" data constructors the description is simply occurrence
-- name of the data constructor itself. Otherwise it is determined by
-- @closureDescription@ from the let binding information.
closureDescription :: Module -- Module
-> Name -- Id of closure binding
-> String
-- Not called for StgRhsCon which have global info tables built in
-- CgConTbls.lhs with a description generated from the data constructor
closureDescription mod_name name
= showSDocDump (char '<' <>
(if isExternalName name
then ppr name -- ppr will include the module name prefix
else pprModule mod_name <> char '.' <> ppr name) <>
char '>')
-- showSDocDump, because we want to see the unique on the Name.
| mcmaniac/ghc | compiler/codeGen/StgCmmBind.hs | bsd-3-clause | 27,159 | 198 | 20 | 6,550 | 4,196 | 2,250 | 1,946 | -1 | -1 |
import Graphics.UI.Gtk
main :: IO ()
main= do
initGUI
window <- windowNew
set window [windowTitle := "Text Entry", containerBorderWidth := 10]
vb <- vBoxNew False 0
containerAdd window vb
hb <- hBoxNew False 0
boxPackStart vb hb PackNatural 0
txtfield <- entryNew
boxPackStart hb txtfield PackNatural 5
button <- buttonNewFromStock stockInfo
boxPackStart hb button PackNatural 0
txtstack <- statusbarNew
boxPackStart vb txtstack PackNatural 0
id <- statusbarGetContextId txtstack "Line"
widgetShowAll window
widgetSetSensitivity button False
onEntryActivate txtfield (saveText txtfield button txtstack id)
onPressed button (statusbarPop txtstack id)
onDestroy window mainQuit
mainGUI
saveText :: Entry -> Button -> Statusbar -> ContextId -> IO ()
saveText fld b stk id = do
txt <- entryGetText fld
let mesg | txt == reverse txt = "\"" ++ txt ++ "\"" ++
" is equal to its reverse"
| otherwise = "\"" ++ txt ++ "\"" ++
" is not equal to its reverse"
widgetSetSensitivity b True
msgid <- statusbarPush stk id mesg
return ()
| k0001/gtk2hs | docs/tutorial/Tutorial_Port/Example_Code/GtkChap4-6.hs | gpl-3.0 | 1,275 | 0 | 14 | 414 | 366 | 161 | 205 | 33 | 1 |
{-# LANGUAGE RankNTypes #-}
{-# LANGUAGE GADTs #-}
{-# LANGUAGE DeriveDataTypeable #-}
{-# LANGUAGE NamedFieldPuns #-}
{-# LANGUAGE GeneralizedNewtypeDeriving #-}
{-# LANGUAGE FlexibleContexts #-}
-- | All types.
module HIndent.Types
(Printer(..)
,PrintState(..)
,Extender(..)
,Style(..)
,Config(..)
,defaultConfig
,NodeInfo(..)
,ComInfo(..)
,ComInfoLocation(..)
) where
import Control.Applicative
import Control.Monad
import Control.Monad.State.Strict (MonadState(..),StateT)
import Control.Monad.Trans.Maybe
import Data.Data
import Data.Functor.Identity
import Data.Int (Int64)
import Data.Text (Text)
import Data.Text.Lazy.Builder (Builder)
import Language.Haskell.Exts.Comments
import Language.Haskell.Exts.Parser
import Language.Haskell.Exts.SrcLoc
-- | A pretty printing monad.
newtype Printer s a =
Printer {runPrinter :: StateT (PrintState s) (MaybeT Identity) a}
deriving (Applicative,Monad,Functor,MonadState (PrintState s),MonadPlus,Alternative)
-- | The state of the pretty printer.
data PrintState s =
PrintState {psIndentLevel :: !Int64 -- ^ Current indentation level.
,psOutput :: !Builder -- ^ The current output.
,psNewline :: !Bool -- ^ Just outputted a newline?
,psColumn :: !Int64 -- ^ Current column.
,psLine :: !Int64 -- ^ Current line number.
,psUserState :: !s -- ^ User state.
,psExtenders :: ![Extender s] -- ^ Extenders.
,psConfig :: !Config -- ^ Config which styles may or may not pay attention to.
,psEolComment :: !Bool -- ^ An end of line comment has just been outputted.
,psInsideCase :: !Bool -- ^ Whether we're in a case statement, used for Rhs printing.
,psParseMode :: !ParseMode -- ^ Mode used to parse the original AST.
,psCommentPreprocessor :: forall m. MonadState (PrintState s) m => [Comment] -> m [Comment] -- ^ Preprocessor applied to comments on an AST before printing.
}
-- | A printer extender. Takes as argument the user state that the
-- printer was run with, and the current node to print. Use
-- 'prettyNoExt' to fallback to the built-in printer.
data Extender s where
Extender :: forall s a. (Typeable a) => (a -> Printer s ()) -> Extender s
CatchAll :: forall s. (forall a. Typeable a => s -> a -> Maybe (Printer s ())) -> Extender s
-- | A printer style.
data Style =
forall s. Style {styleName :: !Text -- ^ Name of the style, used in the commandline interface.
,styleAuthor :: !Text -- ^ Author of the printer (as opposed to the author of the style).
,styleDescription :: !Text -- ^ Description of the style.
,styleInitialState :: !s -- ^ User state, if needed.
,styleExtenders :: ![Extender s] -- ^ Extenders to the printer.
,styleDefConfig :: !Config -- ^ Default config to use for this style.
,styleCommentPreprocessor :: forall s' m. MonadState (PrintState s') m => [Comment] -> m [Comment] -- ^ Preprocessor to use for comments.
}
-- | Configurations shared among the different styles. Styles may pay
-- attention to or completely disregard this configuration.
data Config =
Config {configMaxColumns :: !Int64 -- ^ Maximum columns to fit code into ideally.
,configIndentSpaces :: !Int64 -- ^ How many spaces to indent?
,configClearEmptyLines :: !Bool -- ^ Remove spaces on lines that are otherwise empty?
}
-- | Default style configuration.
defaultConfig :: Config
defaultConfig =
Config {configMaxColumns = 80
,configIndentSpaces = 2
,configClearEmptyLines = False}
-- | Information for each node in the AST.
data NodeInfo =
NodeInfo {nodeInfoSpan :: !SrcSpanInfo -- ^ Location info from the parser.
,nodeInfoComments :: ![ComInfo] -- ^ Comments which are attached to this node.
}
deriving (Typeable,Show,Data)
-- | Comment relative locations.
data ComInfoLocation = Before | After
deriving (Show,Typeable,Data,Eq)
-- | Comment with some more info.
data ComInfo =
ComInfo {comInfoComment :: !Comment -- ^ The normal comment type.
,comInfoLocation :: !(Maybe ComInfoLocation) -- ^ Where the comment lies relative to the node.
}
deriving (Show,Typeable,Data)
| lunaris/hindent | src/HIndent/Types.hs | bsd-3-clause | 4,362 | 0 | 15 | 1,028 | 788 | 479 | 309 | -1 | -1 |
{-# LANGUAGE CPP #-}
{-# LANGUAGE DeriveDataTypeable #-}
{-# LANGUAGE MultiParamTypeClasses #-}
{-# LANGUAGE PackageImports #-}
{-# LANGUAGE OverloadedStrings #-}
{-# LANGUAGE ScopedTypeVariables #-}
{-# LANGUAGE RecordWildCards #-}
{-# LANGUAGE RankNTypes #-}
{-# LANGUAGE GADTs #-}
{-# LANGUAGE StandaloneDeriving #-}
{-# LANGUAGE TemplateHaskell #-}
module Network.HTTP.Download.Verified
( verifiedDownload
, recoveringHttp
, DownloadRequest(..)
, drRetryPolicyDefault
, HashCheck(..)
, CheckHexDigest(..)
, LengthCheck
, VerifiedDownloadException(..)
) where
import qualified Data.List as List
import qualified Data.ByteString as ByteString
import qualified Data.ByteString.Base64 as B64
import qualified Data.Conduit.Binary as CB
import qualified Data.Conduit.List as CL
import qualified Data.Text as Text
import qualified Data.Text.Encoding as Text
import Control.Applicative
import Control.Monad
import Control.Monad.Catch
import Control.Monad.IO.Class
import Control.Monad.Logger (logDebug, MonadLogger)
import Control.Retry (recovering,limitRetries,RetryPolicy,constantDelay)
import Crypto.Hash
import Crypto.Hash.Conduit (sinkHash)
import Data.ByteArray as Mem (convert)
import Data.ByteArray.Encoding as Mem (convertToBase, Base(Base16))
import Data.ByteString (ByteString)
import Data.ByteString.Char8 (readInteger)
import Data.Conduit
import Data.Conduit.Binary (sourceHandle, sinkHandle)
import Data.Foldable (traverse_,for_)
import Data.Monoid
import Data.String
import Data.Text.Encoding (decodeUtf8With)
import Data.Text.Encoding.Error (lenientDecode)
import Data.Typeable (Typeable)
import GHC.IO.Exception (IOException(..),IOErrorType(..))
import Network.HTTP.Client (getUri, path)
import Network.HTTP.Simple (Request, HttpException, httpSink, getResponseHeaders)
import Network.HTTP.Types.Header (hContentLength, hContentMD5)
import Path
import Prelude -- Fix AMP warning
import System.Directory
import System.FilePath ((<.>))
import System.IO
-- | A request together with some checks to perform.
data DownloadRequest = DownloadRequest
{ drRequest :: Request
, drHashChecks :: [HashCheck]
, drLengthCheck :: Maybe LengthCheck
, drRetryPolicy :: RetryPolicy
}
-- | Default to retrying thrice with a short constant delay.
drRetryPolicyDefault :: RetryPolicy
drRetryPolicyDefault = limitRetries 3 <> constantDelay onehundredMilliseconds
where onehundredMilliseconds = 100000
data HashCheck = forall a. (Show a, HashAlgorithm a) => HashCheck
{ hashCheckAlgorithm :: a
, hashCheckHexDigest :: CheckHexDigest
}
deriving instance Show HashCheck
data CheckHexDigest
= CheckHexDigestString String
| CheckHexDigestByteString ByteString
| CheckHexDigestHeader ByteString
deriving Show
instance IsString CheckHexDigest where
fromString = CheckHexDigestString
type LengthCheck = Int
-- | An exception regarding verification of a download.
data VerifiedDownloadException
= WrongContentLength
Request
Int -- expected
ByteString -- actual (as listed in the header)
| WrongStreamLength
Request
Int -- expected
Int -- actual
| WrongDigest
Request
String -- algorithm
CheckHexDigest -- expected
String -- actual (shown)
deriving (Typeable)
instance Show VerifiedDownloadException where
show (WrongContentLength req expected actual) =
"Download expectation failure: ContentLength header\n"
++ "Expected: " ++ show expected ++ "\n"
++ "Actual: " ++ displayByteString actual ++ "\n"
++ "For: " ++ show (getUri req)
show (WrongStreamLength req expected actual) =
"Download expectation failure: download size\n"
++ "Expected: " ++ show expected ++ "\n"
++ "Actual: " ++ show actual ++ "\n"
++ "For: " ++ show (getUri req)
show (WrongDigest req algo expected actual) =
"Download expectation failure: content hash (" ++ algo ++ ")\n"
++ "Expected: " ++ displayCheckHexDigest expected ++ "\n"
++ "Actual: " ++ show actual ++ "\n"
++ "For: " ++ show (getUri req)
instance Exception VerifiedDownloadException
-- This exception is always caught and never thrown outside of this module.
data VerifyFileException
= WrongFileSize
Int -- expected
Integer -- actual (as listed by hFileSize)
deriving (Show, Typeable)
instance Exception VerifyFileException
-- Show a ByteString that is known to be UTF8 encoded.
displayByteString :: ByteString -> String
displayByteString =
Text.unpack . Text.strip . Text.decodeUtf8
-- Show a CheckHexDigest in human-readable format.
displayCheckHexDigest :: CheckHexDigest -> String
displayCheckHexDigest (CheckHexDigestString s) = s ++ " (String)"
displayCheckHexDigest (CheckHexDigestByteString s) = displayByteString s ++ " (ByteString)"
displayCheckHexDigest (CheckHexDigestHeader h) =
show (B64.decodeLenient h) ++ " (Header. unencoded: " ++ show h ++ ")"
-- | Make sure that the hash digest for a finite stream of bytes
-- is as expected.
--
-- Throws WrongDigest (VerifiedDownloadException)
sinkCheckHash :: MonadThrow m
=> Request
-> HashCheck
-> Consumer ByteString m ()
sinkCheckHash req HashCheck{..} = do
digest <- sinkHashUsing hashCheckAlgorithm
let actualDigestString = show digest
let actualDigestHexByteString = Mem.convertToBase Mem.Base16 digest
let actualDigestBytes = Mem.convert digest
let passedCheck = case hashCheckHexDigest of
CheckHexDigestString s -> s == actualDigestString
CheckHexDigestByteString b -> b == actualDigestHexByteString
CheckHexDigestHeader b -> B64.decodeLenient b == actualDigestHexByteString
|| B64.decodeLenient b == actualDigestBytes
-- A hack to allow hackage tarballs to download.
-- They should really base64-encode their md5 header as per rfc2616#sec14.15.
-- https://github.com/commercialhaskell/stack/issues/240
|| b == actualDigestHexByteString
unless passedCheck $
throwM $ WrongDigest req (show hashCheckAlgorithm) hashCheckHexDigest actualDigestString
assertLengthSink :: MonadThrow m
=> Request
-> LengthCheck
-> ZipSink ByteString m ()
assertLengthSink req expectedStreamLength = ZipSink $ do
Sum actualStreamLength <- CL.foldMap (Sum . ByteString.length)
when (actualStreamLength /= expectedStreamLength) $
throwM $ WrongStreamLength req expectedStreamLength actualStreamLength
-- | A more explicitly type-guided sinkHash.
sinkHashUsing :: (Monad m, HashAlgorithm a) => a -> Consumer ByteString m (Digest a)
sinkHashUsing _ = sinkHash
-- | Turns a list of hash checks into a ZipSink that checks all of them.
hashChecksToZipSink :: MonadThrow m => Request -> [HashCheck] -> ZipSink ByteString m ()
hashChecksToZipSink req = traverse_ (ZipSink . sinkCheckHash req)
-- 'Control.Retry.recovering' customized for HTTP failures
recoveringHttp :: (MonadMask m, MonadIO m)
=> RetryPolicy -> m a -> m a
recoveringHttp retryPolicy =
#if MIN_VERSION_retry(0,7,0)
recovering retryPolicy handlers . const
#else
recovering retryPolicy handlers
#endif
where
handlers = [const $ Handler alwaysRetryHttp,const $ Handler retrySomeIO]
alwaysRetryHttp :: Monad m => HttpException -> m Bool
alwaysRetryHttp _ = return True
retrySomeIO :: Monad m => IOException -> m Bool
retrySomeIO e = return $ case ioe_type e of
-- hGetBuf: resource vanished (Connection reset by peer)
ResourceVanished -> True
-- conservatively exclude all others
_ -> False
-- | Copied and extended version of Network.HTTP.Download.download.
--
-- Has the following additional features:
-- * Verifies that response content-length header (if present)
-- matches expected length
-- * Limits the download to (close to) the expected # of bytes
-- * Verifies that the expected # bytes were downloaded (not too few)
-- * Verifies md5 if response includes content-md5 header
-- * Verifies the expected hashes
--
-- Throws VerifiedDownloadException.
-- Throws IOExceptions related to file system operations.
-- Throws HttpException.
verifiedDownload :: (MonadIO m, MonadLogger m)
=> DownloadRequest
-> Path Abs File -- ^ destination
-> (Maybe Integer -> Sink ByteString IO ()) -- ^ custom hook to observe progress
-> m Bool -- ^ Whether a download was performed
verifiedDownload DownloadRequest{..} destpath progressSink = do
let req = drRequest
whenM' (liftIO getShouldDownload) $ do
$logDebug $ "Downloading " <> decodeUtf8With lenientDecode (path req)
liftIO $ do
createDirectoryIfMissing True dir
recoveringHttp drRetryPolicy $
withBinaryFile fptmp WriteMode $ \h ->
httpSink req (go h)
renameFile fptmp fp
where
whenM' mp m = do
p <- mp
if p then m >> return True else return False
fp = toFilePath destpath
fptmp = fp <.> "tmp"
dir = toFilePath $ parent destpath
getShouldDownload = do
fileExists <- doesFileExist fp
if fileExists
-- only download if file does not match expectations
then not <$> fileMatchesExpectations
-- or if it doesn't exist yet
else return True
-- precondition: file exists
-- TODO: add logging
fileMatchesExpectations =
((checkExpectations >> return True)
`catch` \(_ :: VerifyFileException) -> return False)
`catch` \(_ :: VerifiedDownloadException) -> return False
checkExpectations = bracket (openFile fp ReadMode) hClose $ \h -> do
for_ drLengthCheck $ checkFileSizeExpectations h
sourceHandle h $$ getZipSink (hashChecksToZipSink drRequest drHashChecks)
-- doesn't move the handle
checkFileSizeExpectations h expectedFileSize = do
fileSizeInteger <- hFileSize h
when (fileSizeInteger > toInteger (maxBound :: Int)) $
throwM $ WrongFileSize expectedFileSize fileSizeInteger
let fileSize = fromInteger fileSizeInteger
when (fileSize /= expectedFileSize) $
throwM $ WrongFileSize expectedFileSize fileSizeInteger
checkContentLengthHeader headers expectedContentLength =
case List.lookup hContentLength headers of
Just lengthBS -> do
let lengthStr = displayByteString lengthBS
when (lengthStr /= show expectedContentLength) $
throwM $ WrongContentLength drRequest expectedContentLength lengthBS
_ -> return ()
go h res = do
let headers = getResponseHeaders res
mcontentLength = do
hLength <- List.lookup hContentLength headers
(i,_) <- readInteger hLength
return i
for_ drLengthCheck $ checkContentLengthHeader headers
let hashChecks = (case List.lookup hContentMD5 headers of
Just md5BS ->
[ HashCheck
{ hashCheckAlgorithm = MD5
, hashCheckHexDigest = CheckHexDigestHeader md5BS
}
]
Nothing -> []
) ++ drHashChecks
maybe id (\len -> (CB.isolate len =$=)) drLengthCheck
$ getZipSink
( hashChecksToZipSink drRequest hashChecks
*> maybe (pure ()) (assertLengthSink drRequest) drLengthCheck
*> ZipSink (sinkHandle h)
*> ZipSink (progressSink mcontentLength))
| mrkkrp/stack | src/Network/HTTP/Download/Verified.hs | bsd-3-clause | 12,238 | 0 | 20 | 3,265 | 2,406 | 1,281 | 1,125 | -1 | -1 |
{-# LANGUAGE LambdaCase, RankNTypes, EmptyCase,
StandaloneDeriving, FlexibleContexts,
DeriveDataTypeable, DeriveFoldable,
DeriveFunctor, DeriveTraversable,
ScopedTypeVariables, BangPatterns #-}
{-# LANGUAGE UndecidableInstances #-} -- for Streaming show instance
module Streaming.Internal where
import Control.Monad
import Control.Monad.Trans
import Control.Monad.Trans.Class
import Control.Applicative
import Data.Data ( Data, Typeable )
import Data.Foldable ( Foldable )
import Data.Traversable
import Control.Monad.Morph
import Data.Monoid
import Data.Functor.Identity
import GHC.Exts ( build )
import Prelude hiding (splitAt)
-- | A left-strict pair; the base functor for streams of individual elements.
data Of a b = !a :> b
deriving (Data, Eq, Foldable, Functor, Ord,
Read, Show, Traversable, Typeable)
infixr 4 :>
-- | Curry a function of left-strict pairs
kurry :: (Of a b -> c) -> a -> b -> c
kurry f = \a b -> f (a :> b)
{-# INLINE kurry #-}
-- | Uncurry a function into a function on left-strict pairs
unkurry :: (a -> b -> c) -> Of a b -> c
unkurry f = \(a :> b) -> f a b
{-# INLINE unkurry #-}
-- | @Stream@ (\'FreeT\') data type. The constructors are exported by @Streaming.Internal@
data Stream f m r = Step !(f (Stream f m r))
| Delay (m (Stream f m r))
| Return r
deriving (Typeable)
deriving instance (Show r, Show (m (Stream f m r))
, Show (f (Stream f m r))) => Show (Stream f m r)
deriving instance (Eq r, Eq (m (Stream f m r))
, Eq (f (Stream f m r))) => Eq (Stream f m r)
deriving instance (Typeable f, Typeable m, Data r, Data (m (Stream f m r))
, Data (f (Stream f m r))) => Data (Stream f m r)
instance (Functor f, Monad m) => Functor (Stream f m) where
fmap f = buildStream . fmap f . foldStream
{-# INLINE fmap #-}
-- loop = \case Step f -> Step (fmap loop f)
-- Delay m -> Delay (liftM loop m)
-- Return r -> Return (f r)
instance (Functor f, Monad m) => Monad (Stream f m) where
return = Return
{-# INLINE return #-}
(>>) = \phi psi -> buildStream $ Folding (augmentFolding_ (getFolding (foldStream phi))
(getFolding (foldStream psi)))
where
augmentFolding_ ::
(forall r'. (f r' -> r') -> (m r' -> r') -> (s -> r') -> r')
-> (forall r'. (f r' -> r') -> (m r' -> r') -> (r -> r') -> r')
-> (forall r'. (f r' -> r') -> (m r' -> r') -> (r -> r') -> r')
augmentFolding_ = \phi psi construct wrap done ->
phi construct
wrap
(\x -> psi construct
wrap
done)
{-# INLINE augmentFolding_ #-}
{-# INLINE (>>) #-}
s >>= f = buildStream (foldBind (foldStream . f) (foldStream s))
{-# INLINE (>>=) #-}
-- loop lst where
-- loop = \case Step f -> Step (fmap loop f)
-- Delay m -> Delay (liftM loop m)
-- Return r -> f r
instance (Functor f, Monad m) => Applicative (Stream f m) where
pure = buildStream . return
{-# INLINE pure #-}
x <*> y = buildStream $ Folding $ \construct wrap done ->
getFolding (foldStream x)
construct
wrap
(\f -> getFolding (foldStream y)
construct
wrap
(\s -> done (f s)) )
{-# INLINE (<*>) #-}
instance Functor f => MonadTrans (Stream f) where
lift = buildStream . lift
{-# INLINE lift #-}
instance Functor f => MFunctor (Stream f) where
hoist trans = buildStream . hoist trans . foldStream
{-# INLINE hoist #-}
-- loop where
-- loop = \case Step f -> Step (fmap loop f)
-- Delay m -> Delay (trans (liftM loop m))
-- Return r -> Return r
instance (MonadIO m, Functor f) => MonadIO (Stream f m) where
liftIO = buildStream . liftIO
{-# INLINE liftIO #-}
-- | Map streaming layers of one functor to another with a natural transformation
maps :: (Monad m, Functor f) => (forall x . f x -> g x) -> Stream f m r -> Stream g m r
maps phi = buildStream . mapsF phi . foldStream
{-# INLINE maps #-}
mapsM :: (Monad m, Functor f) => (forall x . f x -> m (g x)) -> Stream f m r -> Stream g m r
mapsM phi = buildStream . mapsMF phi . foldStream
{-# INLINE mapsM #-}
maps' :: (Monad m, Functor f)
=> (forall x . f x -> m (a, x))
-> Stream f m r
-> Stream (Of a) m r
maps' phi = loop where
loop stream = case stream of
Return r -> Return r
Delay m -> Delay $ liftM loop m
Step fs -> Delay $ liftM (Step . uncurry (:>)) (phi (fmap loop fs))
{-# INLINABLE maps' #-}
-- church encodings:
-- ----- unwrapped synonym:
type Folding_ f m r = forall r'
. (f r' -> r')
-> (m r' -> r')
-> (r -> r')
-> r'
-- ------ wrapped:
newtype Folding f m r = Folding {getFolding :: Folding_ f m r }
-- these should perhaps be expressed with
-- predefined combinators for Folding_
instance Functor (Folding f m) where
fmap f phi = Folding (\construct wrap done ->
getFolding phi construct
wrap
(done . f))
instance Monad (Folding f m) where
return r = Folding (\construct wrap done -> done r)
(>>=) = flip foldBind
{-# INLINE (>>=) #-}
foldBind f phi = Folding (\construct wrap done ->
getFolding phi construct
wrap
(\a -> getFolding (f a) construct
wrap
done))
{-# INLINE foldBind #-}
instance Applicative (Folding f m) where
pure r = Folding (\construct wrap done -> done r)
phi <*> psi = Folding (\construct wrap done ->
getFolding phi construct
wrap
(\f -> getFolding psi construct
wrap
(\a -> done (f a))))
instance MonadTrans (Folding f) where
lift ma = Folding (\constr wrap done -> wrap (liftM done ma))
{-# INLINE lift #-}
instance Functor f => MFunctor (Folding f) where
hoist trans phi = Folding (\construct wrap done ->
getFolding phi construct (wrap . trans) done)
{-# INLINE hoist #-}
instance (MonadIO m, Functor f) => MonadIO (Folding f m) where
liftIO io = Folding (\construct wrap done ->
wrap (liftM done (liftIO io))
)
{-# INLINE liftIO #-}
mapsF :: (forall x . f x -> g x) -> Folding f m r -> Folding g m r
mapsF morph (Folding phi) = Folding $ \construct wrap done ->
phi (construct . morph)
wrap
done
{-# INLINE mapsF #-}
mapsMF :: (Monad m) => (forall x . f x -> m (g x)) -> Folding f m r -> Folding g m r
mapsMF morph (Folding phi) = Folding $ \construct wrap done ->
phi (wrap . liftM construct . morph)
wrap
done
{-# INLINE mapsMF #-}
mapsFoldF :: (Monad m)
=> (forall x . f x -> m (a, x))
-> Folding f m r
-> Folding (Of a) m r
mapsFoldF crush = mapsMF (liftM (\(a,b) -> a :> b) . crush)
{-# INLINE mapsFoldF #-}
-- -------------------------------------
-- optimization operations: wrapped case
-- -------------------------------------
--
-- `foldStream` is a flipped and wrapped variant of Atkey's
-- effectfulFolding :: (Functor f, Monad m) =>
-- (m x -> x) -> (r -> x) -> (f x -> x) -> Stream f m r -> x
-- modulo the 'Return' constructor, which implicitly restricts the
-- available class of Functors.
-- See http://bentnib.org/posts/2012-01-06-streams.html and
-- the (nightmarish) associated paper.
-- Our plan is thus where possible to replace the datatype Stream with
-- the associated effectfulFolding itself, wrapped as Folding
foldStream :: (Functor f, Monad m) => Stream f m t -> Folding f m t
foldStream lst = Folding (destroy lst)
{-# INLINE[0] foldStream #-}
buildStream :: Folding f m r -> Stream f m r
buildStream (Folding phi) = phi Step Delay Return
{-# INLINE[0] buildStream #-}
-- The compiler has no difficulty with the rule for the wrapped case.
-- I have not investigated whether the remaining newtype
-- constructor is acting as an impediment. The stage [0] or [1]
-- seems irrelevant in either case.
{-# RULES
"foldStream/buildStream" forall phi.
foldStream (buildStream phi) = phi
#-}
-- -------------------------------------
-- optimization operations: unwrapped case
-- -------------------------------------
destroy
:: (Functor f, Monad m) =>
Stream f m r -> (f b -> b) -> (m b -> b) -> (r -> b) -> b
destroy = \lst construct wrap done ->
let loop = \case Delay mlst -> wrap (liftM loop mlst)
Step flst -> construct (fmap loop flst)
Return r -> done r
in loop lst
{-# INLINABLE destroy #-}
construct
:: (forall b . (f b -> b) -> (m b -> b) -> (r -> b) -> b) -> Stream f m r
construct = \phi -> phi Step Delay Return
{-# INLINE construct #-}
foldStreamx
:: (Functor f, Monad m) =>
Stream f m t -> (f b -> b) -> (m b -> b) -> (t -> b) -> b
foldStreamx = \lst construct wrap done ->
let loop = \case Delay mlst -> wrap (liftM loop mlst)
Step flst -> construct (fmap loop flst)
Return r -> done r
in loop lst
{-# INLINE[1] foldStreamx #-}
buildStreamx = \phi -> phi Step Delay Return
{-# INLINE[1] buildStreamx #-}
-- The compiler seems to have trouble seeing these rules as applicable,
-- unlike those for foldStream & buildStream. Opaque arity is
-- a plausible hypothesis when you know nothing yet.
-- When additional arguments are given to a rule,
-- the most saturated is the one that fires,
-- but it only fires where this one would.
{-# RULES
"foldStreamx/buildStreamx" forall phi.
foldStreamx (buildStreamx phi) = phi
#-}
buildList_ :: Folding_ (Of a) Identity () -> [a]
buildList_ phi = phi (\(a :> as) -> a : as)
(\(Identity xs) -> xs)
(\() -> [])
{-# INLINE buildList_ #-}
buildListM_ :: Monad m => Folding_ (Of a) m () -> m [a]
buildListM_ phi = phi (\(a :> mas) -> liftM (a :) mas)
(>>= id)
(\() -> return [])
{-# INLINE buildListM_ #-}
foldList_ :: Monad m => [a] -> Folding_ (Of a) m ()
foldList_ xs = \construct wrap done ->
foldr (\x r -> construct (x:>r)) (done ()) xs
{-# INLINE foldList_ #-}
buildList :: Folding (Of a) Identity () -> [a]
buildList = \(Folding phi) -> buildList_ phi
{-# INLINE[0] buildList #-}
foldList :: Monad m => [a] -> Folding (Of a) m ()
foldList = \xs -> Folding (foldList_ xs)
{-# INLINE[0] foldList #-}
{-# RULES
"foldr/buildList" forall phi op seed .
foldr op seed (buildList phi) =
getFolding phi (unkurry op) runIdentity (\() -> seed)
#-}
{-# RULES
"foldr/buildList_" forall (phi :: Folding_ (Of a) Identity ())
(op :: a -> b -> b)
(seed :: b) .
foldr op seed (buildList_ phi) =
phi (unkurry op) runIdentity (\() -> seed)
#-}
{-# RULES
"foldList/buildList" forall phi.
foldList(buildList phi) = phi
#-}
--
intercalates :: (Monad m, Monad (t m), MonadTrans t) =>
t m a -> Stream (t m) m b -> t m b
intercalates sep = go0
where
go0 f = case f of
Return r -> return r
Delay m -> lift m >>= go0
Step fstr -> do
f' <- fstr
go1 f'
go1 f = case f of
Return r -> return r
Delay m -> lift m >>= go1
Step fstr -> do
sep
f' <- fstr
go1 f'
{-# INLINABLE intercalates #-}
intercalates' :: (Monad m, Monad (t m), MonadTrans t) =>
t m a -> Stream (t m) m b -> t m b
intercalates' sep s = getFolding (foldStream s)
(\tmstr -> do
str <- tmstr
sep
str
)
(join . lift)
return
{-# INLINE intercalates' #-}
iterTM ::
(Functor f, Monad m, MonadTrans t,
Monad (t m)) =>
(f (t m a) -> t m a) -> Stream f m a -> t m a
iterTM out str = getFolding (foldStream str) out (join . lift) return
{-# INLINE iterTM #-}
iterT ::
(Functor f, Monad m) => (f (m a) -> m a) -> Stream f m a -> m a
iterT out str = getFolding (foldStream str) out join return
{-# INLINE iterT #-}
concats ::
(MonadTrans t, Monad (t m), Monad m) =>
Stream (t m) m a -> t m a
concats str = getFolding (foldStream str) join (join . lift) return
{-# INLINE concats #-}
splitAt :: (Monad m, Functor f) => Int -> Stream f m r -> Stream f m (Stream f m r)
splitAt = loop where
loop !n stream
| n <= 1 = Return stream
| otherwise = case stream of
Return r -> Return (Return r)
Delay m -> Delay (liftM (loop n) m)
Step fs -> case n of
0 -> Return (Step fs)
_ -> Step (fmap (loop (n-1)) fs)
{-# INLINABLE splitAt #-}
chunksOf :: (Monad m, Functor f) => Int -> Stream f m r -> Stream (Stream f m) m r
chunksOf n0 = loop where
loop stream = case stream of
Return r -> Return r
Delay m -> Delay (liftM loop m)
Step fs -> Step $ Step $ fmap (fmap loop . splitAt n0) fs
{-# INLINABLE chunksOf #-}
| haskell-streaming/streaming | benchmarks/old/Stream/Internal.hs | bsd-3-clause | 13,498 | 0 | 20 | 4,276 | 4,217 | 2,184 | 2,033 | 285 | 5 |
{-# LANGUAGE OverloadedLabels #-}
module PatFail009 where
f #a = _
| sdiehl/ghc | testsuite/tests/parser/should_fail/patFail009.hs | bsd-3-clause | 68 | 1 | 5 | 12 | 13 | 8 | 5 | -1 | -1 |
{-# LANGUAGE OverloadedStrings #-}
{-# LANGUAGE DeriveDataTypeable #-}
{-# LANGUAGE DeriveGeneric #-}
{-# LANGUAGE TemplateHaskell #-}
module Ros.Actionlib_msgs.GoalStatus where
import qualified Prelude as P
import Prelude ((.), (+), (*))
import qualified Data.Typeable as T
import Control.Applicative
import Ros.Internal.RosBinary
import Ros.Internal.Msg.MsgInfo
import qualified GHC.Generics as G
import qualified Data.Default.Generics as D
import qualified Data.Word as Word
import qualified Ros.Actionlib_msgs.GoalID as GoalID
import Lens.Family.TH (makeLenses)
import Lens.Family (view, set)
data GoalStatus = GoalStatus { _goal_id :: GoalID.GoalID
, _status :: Word.Word8
, _text :: P.String
} deriving (P.Show, P.Eq, P.Ord, T.Typeable, G.Generic)
$(makeLenses ''GoalStatus)
instance RosBinary GoalStatus where
put obj' = put (_goal_id obj') *> put (_status obj') *> put (_text obj')
get = GoalStatus <$> get <*> get <*> get
instance MsgInfo GoalStatus where
sourceMD5 _ = "d388f9b87b3c471f784434d671988d4a"
msgTypeName _ = "actionlib_msgs/GoalStatus"
instance D.Default GoalStatus
pending :: Word.Word8
pending = 0
active :: Word.Word8
active = 1
preempted :: Word.Word8
preempted = 2
succeeded :: Word.Word8
succeeded = 3
aborted :: Word.Word8
aborted = 4
rejected :: Word.Word8
rejected = 5
preempting :: Word.Word8
preempting = 6
recalling :: Word.Word8
recalling = 7
recalled :: Word.Word8
recalled = 8
lost :: Word.Word8
lost = 9
| acowley/roshask | msgs/Actionlib_msgs/Ros/Actionlib_msgs/GoalStatus.hs | bsd-3-clause | 1,553 | 1 | 10 | 305 | 420 | 249 | 171 | 49 | 1 |
{-# LANGUAGE ForeignFunctionInterface, CPP #-}
import Foreign.C
foreign import ccall unsafe "test" test :: CInt -> IO ()
main :: IO ()
-- Use conditional language to test passing a file with a filename
-- starting with a hyphen to the preprocessor.
#if defined(__GLASGOW_HASKELL__)
main = test 3
#endif
| sdiehl/ghc | testsuite/tests/driver/T12674/-T12674.hs | bsd-3-clause | 304 | 0 | 8 | 50 | 52 | 29 | 23 | 4 | 0 |
module Distribution.Server.Framework.Logging (
Verbosity,
lognotice,
loginfo,
logdebug,
logTiming,
) where
import Distribution.Verbosity
import System.IO
import qualified Data.ByteString.Char8 as BS -- No UTF8 in log messages
import System.Environment
import Control.Monad (when)
import Data.Time.Clock (getCurrentTime, diffUTCTime)
lognotice :: Verbosity -> String -> IO ()
lognotice verbosity msg =
when (verbosity >= normal) $ do
pname <- getProgName
BS.hPutStrLn stdout (BS.pack $ pname ++ ": " ++ msg)
hFlush stdout
loginfo :: Verbosity -> String -> IO ()
loginfo verbosity msg =
when (verbosity >= verbose) $ do
BS.hPutStrLn stderr (BS.pack msg)
hFlush stderr
logdebug :: Verbosity -> String -> IO ()
logdebug verbosity msg =
when (verbosity >= deafening) $ do
BS.hPutStrLn stderr (BS.pack msg)
hFlush stderr
logTiming :: Verbosity -> String -> IO a -> IO a
logTiming verbosity msg action = do
t <- getCurrentTime
res <- action
t' <- getCurrentTime
loginfo verbosity (msg ++ ". time: " ++ show (diffUTCTime t' t))
return res
| ocharles/hackage-server | Distribution/Server/Framework/Logging.hs | bsd-3-clause | 1,117 | 0 | 13 | 241 | 381 | 193 | 188 | 35 | 1 |
{-# LANGUAGE TemplateHaskell #-}
{-# LANGUAGE OverloadedStrings #-}
import Network.Wai.Application.Static
import Network.Wai.Handler.Warp (run)
import Data.FileEmbed
import WaiAppStatic.Types
import WaiAppStatic.Storage.Embedded
main :: IO ()
main = run 3000 $ staticApp (embeddedSettings $(embedDir "test"))
{ ssIndices = []
, ssMaxAge = NoMaxAge
}
| jberryman/wai | wai-app-static/embedded-sample.hs | mit | 363 | 0 | 12 | 54 | 92 | 53 | 39 | 11 | 1 |
{-# LANGUAGE FlexibleInstances #-}
-- This just tests what the kind error message looks like
-- Trac #1633
module T1633 where
instance Functor Bool
| ezyang/ghc | testsuite/tests/typecheck/should_fail/T1633.hs | bsd-3-clause | 151 | 0 | 5 | 27 | 14 | 9 | 5 | 3 | 0 |
-- Explicit export list
-- Produces error
-- > ghc-stage2: panic! (the 'impossible' happened)
-- > (GHC version 7.7.20130109 for x86_64-unknown-linux):
-- > nameModule solveV{v r3Ep}
-- It is something about internal vs external names.
{-# LANGUAGE ParallelArrays, ParallelListComp #-}
{-# OPTIONS -fvectorise #-}
module ExportList (solvePA) where
import Data.Array.Parallel hiding ((+), (-), (*), (/))
import Data.Array.Parallel.PArray
import Data.Array.Parallel.Prelude.Bool as B
import Data.Array.Parallel.Prelude.Double as D
import qualified Data.Array.Parallel.Prelude.Int as I
import qualified Data.Vector as V
import qualified Prelude as P
data NodeV = NodeV Double Double Double [:NodeV:]
{-# NOINLINE solvePA #-}
solvePA
:: NodeV -- ^ nodes
-> Double -- ^ time
-> PArray Double
solvePA nodes t = toPArrayP (solveV t)
solveV :: Double -> [:Double:]
solveV t
= concatP (mapP solveV [: :])
| ghc-android/ghc | testsuite/tests/dph/modules/ExportList.hs | bsd-3-clause | 988 | 10 | 9 | 218 | 197 | 129 | 68 | 20 | 1 |
-- !!! string gaps
-- !!!
module Main(main) where
-----------
main = putStr "\
\Some girls give me money\n\
\Some girls buy me clothes\n\
\..."
-----------
main2 = putStr "\
\ \
..."
-----------
main3 = putStr "\
\Some girls give me money\n\
-- and here is a comment
\Some girls buy me clothes\n\
\..."
-----------
main3 = putStr "\
{-
and here is a nested {- comment -}
-}
\Some girls give me money\n\
\Some girls buy me clothes\n\
\..."
| forked-upstream-packages-for-ghcjs/ghc | testsuite/tests/parser/should_fail/readFail004.hs | bsd-3-clause | 458 | 15 | 6 | 100 | 105 | 65 | 40 | -1 | -1 |
module System.Apotiki.Tar (getControl, getStrictControl) where
import Data.List
import qualified Data.Map as M
import qualified Codec.Archive.Tar as Tar
import qualified Data.ByteString as BS
import qualified Data.ByteString.Lazy as B
import qualified Data.ByteString.Lazy.Char8 as BC
import qualified Codec.Compression.GZip as Z
tarEntryList :: Tar.Entries Tar.FormatError -> [Tar.Entry] -> [Tar.Entry]
tarEntryList entries outlist =
case entries of
Tar.Next entry (more) -> (tarEntryList more (entry:outlist))
Tar.Done -> outlist
Tar.Fail e -> error (show e)
tarEntryPayload :: Tar.EntryContent -> String
tarEntryPayload (Tar.NormalFile payload size) = BC.unpack payload
getStrictControl :: BS.ByteString -> String
getStrictControl content =
getControl $ B.fromChunks [content]
getControl :: B.ByteString -> String
getControl content =
tarEntryPayload $ Tar.entryContent entry
where unzipped = Z.decompress content
entries = tarEntryList (Tar.read unzipped) []
entry = case find ((== "./control") . Tar.entryPath) entries of
Just entry -> entry
Nothing -> error (show $ map Tar.entryPath entries)
| pyr/apotiki | System/Apotiki/Tar.hs | isc | 1,169 | 0 | 14 | 207 | 360 | 199 | 161 | 27 | 3 |
{- |
Haskell functions for consuming the API.
-}
module Factory.Haskell where
import Servant
import qualified Control.Monad.Trans.Except as Except
import qualified Factory.API as API
import qualified Factory.Types.Widget as Widget
import qualified Servant.Client as Servant
import qualified Network.HTTP.Client as HTTP
{- |
Run an action. If it's successful, return the value. If it fails, 'error'
with the message.
-}
run :: Action a -> IO a
run action = do
result <- Except.runExceptT action
case result of
Left message -> error (show message)
Right x -> return x
{- |
A convenient type alias for API consumers. This looks the same as
'Factory.Server.Action', but the 'Servant.ServantErr' comes from
@Servant.Client@ instead of @Servant@.
-}
type Action a = Except.ExceptT Servant.ServantError IO a
{- |
Get all of the widgets. See 'API.ListWidgets'.
-}
listWidgets :: HTTP.Manager -> Servant.BaseUrl -> Action [Widget.Widget]
{- |
Create a new widget. See 'API.CreateWidget'.
-}
createWidget :: Widget.Widget -> HTTP.Manager -> Servant.BaseUrl -> Action Widget.Widget
{- |
Try to get a particular widget. See 'API.ShowWidget'.
-}
showWidget :: Int -> HTTP.Manager -> Servant.BaseUrl -> Action Widget.Widget
{- |
Update an existing widget. See 'API.UpdateWidget'.
-}
updateWidget :: Int -> Widget.Widget -> HTTP.Manager -> Servant.BaseUrl -> Action Widget.Widget
{- |
Destroy an existing widget. See 'API.DestroyWidget'.
-}
destroyWidget :: Int -> HTTP.Manager -> Servant.BaseUrl -> Action Widget.Widget
( listWidgets
:<|> createWidget
:<|> showWidget
:<|> updateWidget
:<|> destroyWidget
) = Servant.client API.documentedAPI
{- |
The default server location.
-}
host :: Servant.BaseUrl
host = Servant.BaseUrl Servant.Http "localhost" 8080 ""
| alexanderkjeldaas/factory | library/Factory/Haskell.hs | mit | 1,850 | 0 | 12 | 354 | 346 | 189 | 157 | 27 | 2 |
{-# LANGUAGE OverloadedStrings, DeriveDataTypeable #-}
module Castor.Externs ( Exports(..)
, mkBif
, mkImport'
, importCompiled
, importScript
) where
import Control.Applicative
import Control.Monad.Error
import Data.Char
import Data.IORef
import Data.List
import qualified Data.Map as M
import Data.Monoid
import qualified Data.Text as T
import qualified Data.Traversable as T
import Data.Maybe
import Data.Typeable
import System.Directory
import System.FilePath
import qualified System.Plugins.Load as P
import System.IO
import Text.Parsec
import Castor.Types
import Castor.Parser
import {-# SOURCE #-} Castor.Interpreter (eval, mkMinimalRef)
newtype Exports = Exports { runExports :: M.Map Sym Value }
deriving (Typeable)
-- Make a new built-in function.
mkBif :: [Sym] -> Proc -> Value
mkBif syms f = foldr bindProc (Procedure f) syms
where
bindProc sym proc = Function sym proc Nothing
importCompiled :: FilePath -> Result
importCompiled fp = do
mv <- liftIO $ P.load fp [] [] "exports"
case mv of
P.LoadFailure msg -> throwError $ fromString $ show msg
P.LoadSuccess _ v -> do
minimal <- mkMinimalRef $ takeDirectory fp
Aggregate <$> T.mapM (\value -> eval value minimal minimal) v
importScript :: FilePath -> Result
importScript fp = do
code <- liftIO $ readFile fp
case parse program fp $ T.pack $ code of
Left err -> throwError $ fromString $ show err
Right v -> do
minimal <- mkMinimalRef $ takeDirectory fp
T.mapM (\value -> eval value minimal minimal) v
maybeExports <- liftIO $ elookup (Sym "exports") minimal
return $ case maybeExports of
Just exports -> exports
Nothing -> Aggregate M.empty
mkImport' :: FilePath -> Value
mkImport' fp = mkBif [Sym "path"] $ \envref _ -> do
x <- extract (Sym "path") envref
let path = combine fp $ toString x
fex <- liftIO $ doesFileExist path
if not fex then throwError $ Quoted $ Symbol $ Sym "no-such-file" else do
let importf = if ".o" `isSuffixOf` path then importCompiled
else importScript
importf path
| rfw/castor | src/Castor/Externs.hs | mit | 2,317 | 0 | 17 | 664 | 680 | 352 | 328 | 59 | 3 |
{-# LANGUAGE BangPatterns #-}
{-|
Copyright (c) 2014 Maciej Bendkowski
Permission is hereby granted, free of charge, to any person obtaining
a copy of this software and associated documentation files (the
"Software"), to deal in the Software without restriction, including
without limitation the rights to use, copy, modify, merge, publish,
distribute, sublicense, and/or sell copies of the Software, and to
permit persons to whom the Software is furnished to do so, subject to
the following conditions:
The above copyright notice and this permission notice shall be
included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
-}
module Memo where
{-|
An infinite binary tree data structure
with additional functor capabilities. Used
as a mean for the open recursion memorization pattern.
-}
data Tree a = Tree (Tree a) a (Tree a)
instance Functor Tree where
fmap f (Tree l x r) = Tree (fmap f l) (f x) (fmap f r)
{-|
Tree indexer
-}
idx :: Tree a -> Int -> a
idx (Tree _ x _) 0 = x
idx (Tree l _ r) k = case (k-1) `divMod` 2 of
(k', 0) -> idx l k'
(k', _) -> idx r k'
{-|
Natural numbers represented in an
infinite binary tree.
-}
nats :: Tree Int
nats = f 0 1 where
f :: Int -> Int -> Tree Int
f !k !s = Tree (f l s') k (f r s') where
l = k + s
r = l + s
s' = s * 2
{-|
Tree to list converter.
-}
toList :: Tree a -> [a]
toList ts = map (idx ts) [0..] | maciej-bendkowski/LCCLUtils | src/Memo.hs | mit | 2,113 | 0 | 9 | 641 | 337 | 176 | 161 | 19 | 2 |
{-# LANGUAGE CPP #-}
module GHCJS.DOM.HTMLEmbedElement (
#if (defined(ghcjs_HOST_OS) && defined(USE_JAVASCRIPTFFI)) || !defined(USE_WEBKIT)
module GHCJS.DOM.JSFFI.Generated.HTMLEmbedElement
#else
module Graphics.UI.Gtk.WebKit.DOM.HTMLEmbedElement
#endif
) where
#if (defined(ghcjs_HOST_OS) && defined(USE_JAVASCRIPTFFI)) || !defined(USE_WEBKIT)
import GHCJS.DOM.JSFFI.Generated.HTMLEmbedElement
#else
import Graphics.UI.Gtk.WebKit.DOM.HTMLEmbedElement
#endif
| plow-technologies/ghcjs-dom | src/GHCJS/DOM/HTMLEmbedElement.hs | mit | 465 | 0 | 5 | 39 | 33 | 26 | 7 | 4 | 0 |
{-# LANGUAGE NoImplicitPrelude #-}
module Goolosh.SDL.Init where
import System.IO
import Data.String(IsString(..),String)
import Control.Monad(Monad(..))
import qualified SDL
import Goolosh.SDL.State
initSDL :: String -> IO SDLState
initSDL title = do
SDL.initialize
[ SDL.InitTimer
, SDL.InitVideo
, SDL.InitEvents
]
--
let windowTitle = fromString title
let windowConfig = SDL.defaultWindow
let renderConfig = SDL.defaultRenderer
--
window <- SDL.createWindow windowTitle windowConfig
renderer <- SDL.createRenderer window (-1) renderConfig
let ret = SDLState
{ sdlWindow = window
, sdlRenderer = renderer
}
return ret
-- | LeviSchuck/Goolosh | src/Goolosh/SDL/Init.hs | mit | 737 | 0 | 11 | 191 | 198 | 107 | 91 | 22 | 1 |
-- | Data.TSTP.Theory module.
-- Adapted from https://github.com/agomezl/tstp2agda.
module Data.TSTP.Theory
( Theory
( AC
, Equality
)
) where
------------------------------------------------------------------------------
data Theory = AC | Equality
deriving (Eq, Ord, Read, Show)
| jonaprieto/athena | src/Data/TSTP/Theory.hs | mit | 312 | 0 | 6 | 62 | 50 | 32 | 18 | 8 | 0 |
{-# LANGUAGE FlexibleInstances #-}
{-# LANGUAGE OverloadedStrings #-}
{-# LANGUAGE DataKinds #-}
{-# LANGUAGE TypeSynonymInstances #-}
{-# OPTIONS_GHC -fno-warn-orphans #-}
module IHaskell.Display.Widgets.Selection.SelectionRangeSlider
( -- * The SelectionRangeSlider Widget
SelectionRangeSlider
-- * Constructor
, mkSelectionRangeSlider
) where
-- To keep `cabal repl` happy when running from the ihaskell repo
import Prelude
import Control.Monad (void)
import Data.Aeson
import Data.IORef (newIORef)
import qualified Data.Scientific as Sci
import qualified Data.Vector as V
import Data.Vinyl (Rec(..), (<+>), rput)
import IHaskell.Display
import IHaskell.Eval.Widgets
import IHaskell.IPython.Message.UUID as U
import IHaskell.Display.Widgets.Types
import IHaskell.Display.Widgets.Common
import IHaskell.Display.Widgets.Layout.LayoutWidget
import IHaskell.Display.Widgets.Style.DescriptionStyle
-- | A 'SelectionRangeSlider' represents a SelectionSlider widget from IPyhon.widgets
type SelectionRangeSlider = IPythonWidget 'SelectionRangeSliderType
-- | Create a new SelectionRangeSlider widget
mkSelectionRangeSlider :: IO SelectionRangeSlider
mkSelectionRangeSlider = do
wid <- U.random
layout <- mkLayout
dstyle <- mkDescriptionStyle
let selectionAttrs = defaultMultipleSelectionWidget "SelectionRangeSliderView" "SelectionRangeSliderModel" layout $ StyleWidget dstyle
selectionRangeSliderAttrs = (Orientation =:: HorizontalOrientation)
:& (ReadOut =:: True)
:& (ContinuousUpdate =:: True)
:& RNil
widgetState = WidgetState $ rput (Indices =:. ([0,0], rangeSliderVerification)) $ selectionAttrs <+> selectionRangeSliderAttrs
stateIO <- newIORef widgetState
let widget = IPythonWidget wid stateIO
-- Open a comm for this widget and store it in the kernel state
widgetSendOpen widget $ toJSON widgetState
-- Return the created widget
return widget
instance IHaskellWidget SelectionRangeSlider where
getCommUUID = uuid
comm widget val _ =
case nestedObjectLookup val ["state", "index"] of
Just (Array indices) -> do
let indicesList = map (\(Number x) -> Sci.coefficient x) $ V.toList indices
void $ setField' widget Indices indicesList
triggerSelection widget
_ -> pure () | gibiansky/IHaskell | ihaskell-display/ihaskell-widgets/src/IHaskell/Display/Widgets/Selection/SelectionRangeSlider.hs | mit | 2,499 | 0 | 19 | 573 | 461 | 257 | 204 | 48 | 1 |
module IsbnVerifier (isbn) where
import Data.Char (isDigit, digitToInt)
isbn :: String -> Bool
isbn s = length (filter (/= '-') s) == 10 && length nums == 9 && verify > -1 && 0 == (verify + sum (zipWith (*) [10,9..] nums)) `mod` 11
where nums = map digitToInt $ filter isDigit $ init s
verify = check $ last s
check :: Char -> Int
check 'x' = 10
check 'X' = 10
check n | isDigit n = digitToInt n
check _ = -1
| c19/Exercism-Haskell | isbn-verifier/src/IsbnVerifier.hs | mit | 422 | 0 | 16 | 99 | 212 | 110 | 102 | 11 | 1 |
module Main where
import Control.Applicative (Applicative((<*>), pure))
import Control.Monad (ap)
import Data.Ratio ((%))
import System.Exit (exitFailure, exitSuccess)
import System.IO (hPutStrLn, stderr)
import System.Timeout (timeout)
import Text.Printf (printf)
import qualified Text.ParserCombinators.ReadP as P
import Data.SciRatio
import Data.SciRatio.Read
import Data.SciRatio.Show
-- | Whether output should be suppressed.
quiet :: Bool
quiet = True
data TestState
= TestState
{ failCount :: Int }
initTestState :: TestState
initTestState
= TestState
{ failCount = 0 }
newtype Test a = Test { runTest :: TestState -> IO (a, TestState) }
instance Functor Test where
fmap f u = return f `ap` u
instance Applicative Test where
pure = return
(<*>) = ap
instance Monad Test where
return x = Test $ \ s -> return (x, s)
Test f >>= u = Test $ \ s -> do
(x, s') <- f s
runTest (u x) s'
fail s = failTest "aborted" >> fail s
failTest :: String -> Test ()
failTest s = do
liftIO . hPutStrLn stderr $ "*** FAIL: " ++ s
s <- getTestState
let TestState { failCount = n } = s
putTestState s { failCount = n + 1 }
passTest :: String -> Test ()
passTest s =
if quiet
then return ()
else liftIO . putStrLn $ "ok" ++ if null s then "" else printf " (%s)" s
expect :: String -> Bool -> Test ()
expect s False = failTest s
expect s True = passTest s
liftIO :: IO a -> Test a
liftIO m = Test $ \ s -> do
x <- m
return (x, s)
getTestState :: Test TestState
getTestState = Test $ \ s -> return (s, s)
putTestState :: TestState -> Test ()
putTestState s = Test $ \ _ -> return ((), s)
-- | Set a time limit for a test (in seconds).
timeoutT :: Double -> String -> Test () -> Test ()
timeoutT t n m = Test $ \ s -> do
result <- timeout (round (t * 1e6)) (runTest m s)
case result of
Just (x, s) -> return (x, s)
Nothing -> runTest (failTest ("timed out: " ++ n)) s
testCases :: [(String, SciRational)]
testCases =
[ ("0", -0.0e+3)
, ("25", 0.25e+2)
, ("-.1", -1.0e-1)
, ("2.5e20/3", 5.0e+20 / 6)
, ("-0.0e+3", (0 % 1) .^ 0)
, ("0.25e+2", (25 % 1) .^ 0)
, ("-1.0e-1", ((-1) % 1) .^ (-1))
, ("5.0e+20/6.e0", (25 % 3) .^ 19)
, ("0xfeedface", 4277009102)
, ("0xfeedface", 0xfeedface)
, ("0e0", 0)
, ("-00", 0)
, ("-.0000", 0)
, ("-.01", -0.01)
, (".00008", 0.00008)
, ("1", 1)
, ("+1", 1)
, ("-1", -1)
, ("1.", 1)
, ("-1.", -1)
, ("+1.", 1)
, ("3/5", 3 / 5)
, ("5e20/5", 5e20 / 5)
, ("1/3", 1 / 3)
, ("-2/3", -2 / 3)
, ("-2000/3", -2000 / 3)
, ("-2000/-3", -2000 / (-3))
, ("-2000/-3.e3", -2000 / (-3e3))
, ("37e-25/125", 37e-25 / 125)
, ("-1/2", -1 / 2)
, ("2e1", 2e1)
, ("1e2", 1e2)
, ("1e-3", 1e-3)
, ("-.48e+1/.39", -0.48e1 / 0.39)
, ("-2.14e+1/17", -2.14e1 / 17)
, ("-.3e+1/17", -0.3e1 / 17)
, ("-2.e+1/17", -2e1 / 17)
, ("3.14e-17", 3.14e-17)
, ("0xfFed90", 0xfFed90)
, ("0o013573", 0o013573)
, ("0b1001101", 77)
]
testCasesExtreme :: [(String, SciRational)]
testCasesExtreme =
[ ("1e99999999", (1 % 1) .^ 99999999)
, ("37e-200/125", 37 / 125 .^ (-200))
, ("0.23456e-99999999", 0.23456 .^ (-99999999))
, ("-0.002397/338792e-9999999999", -0.002397 / 338792 .^ 9999999999)
]
readSciRationalT :: String -> (SciRational -> Test ()) -> Test ()
readSciRationalT s f = case P.readP_to_S readSciRationalP s of
[(x, [])] -> f x
r -> failTest $ printf "can't parse: %s (%s)" s (show r)
-- | Tests the comparison operation in both directions.
testOrd :: String -> Ordering -> String -> Test ()
testOrd s ordering s' =
readSciRationalT s $ \ x ->
readSciRationalT s' $ \ x' -> do
expect (printf "%s %s %s" s (show ordering) s') $
compare x x' == ordering
expect (printf "%s %s %s" s' (show ordering') s) $
compare x' x == ordering'
where invert GT = LT
invert EQ = EQ
invert LT = GT
ordering' = invert ordering
testShowRead :: String -> SciRational -> Test ()
testShowRead s x =
let s' = showNumber x in
readSciRationalT s' $ \ x' ->
if x' == x
then passTest $ printf "%s ==> %s" s s'
else failTest $ printf "shown result (%s) not equal to original (%s)"
(show x') (show x)
testReadShowRead :: (String, SciRational) -> Test ()
testReadShowRead (s, y) =
readSciRationalT s $ \ x ->
if x == y
then testShowRead s x
else failTest $ printf "parsed result (%s) not equal to expected (%s): %s"
(show x) (show y) s
test :: Test ()
test = do
-- recip must recanonicalize the number otherwise this test would fail
expect "5 == recip (1 / 5)" $
5 == recip (1 / 5 :: SciRational)
-- comparison tests
testOrd "0" EQ "0"
testOrd "0" LT "1"
testOrd "0" GT "-1"
testOrd "10" LT "11"
testOrd "11" GT "10"
testOrd "-5e2" LT "400"
testOrd "5e2" GT "400"
testOrd "3.5e99999" GT "-2"
testOrd "-3.5e99999" LT "2"
testOrd "-3.5e-99999" LT "2"
testOrd "3.5e99999" EQ "3.5e99999"
testOrd "-3.5e99999" LT "3.5e99999"
testOrd "-3.5e99999" EQ "-3.5e99999"
testOrd "-3.5e99999" LT "-2.5e99999"
testOrd "3.5e99999" GT "2.5e99999"
testOrd "3.5e99999" GT "0"
mapM_ testReadShowRead testCases
timeoutT 5 "testCasesExtreme" $ mapM_ testReadShowRead testCasesExtreme
main :: IO ()
main = do
((), TestState { failCount = n }) <- runTest test initTestState
if n > 0 then exitFailure else exitSuccess
| Rufflewind/sci-ratio | tests/Main.hs | mit | 5,427 | 0 | 17 | 1,309 | 2,162 | 1,169 | 993 | 167 | 3 |
module Main where
import Primes (primeFactors)
import Util (removeDuplicates, iterativelyTake)
n = 5
checkGroup g = (head nums, check1)
where
nums = map fst g
factors = map snd g
check1 = all (\x-> length x == n) factors
main :: IO ()
main =
let
candidates = [2..]
factorized = zip candidates $ map (removeDuplicates.primeFactors) candidates
grouped = iterativelyTake n factorized
results = map checkGroup grouped
valid = filter snd results
in
print.fst.head $ valid | liefswanson/projectEuler | app/p1/q47/Main.hs | gpl-2.0 | 554 | 0 | 12 | 163 | 181 | 96 | 85 | 17 | 1 |
module Probability.Distribution.Binomial where
import Probability.Random
import MCMC
builtin binomial_density 3 "binomial_density" "Distribution"
builtin builtin_sample_binomial 3 "sample_binomial" "Distribution"
binomial_bounds n = integer_between 0 n
binomial_effect n x = do
add_move $ slice_sample_integer_random_variable x (binomial_bounds n)
add_move $ inc_dec_mh x (binomial_bounds n)
sample_binomial n p = RandomStructure (binomial_effect n) modifiable_structure $ liftIO (IOAction (\s->(s,builtin_sample_binomial n p s)))
binomial n p = Distribution "binomial" (make_densities $ binomial_density n p) (no_quantile "binomial") (sample_binomial n p) (binomial_bounds n)
| bredelings/BAli-Phy | haskell/Probability/Distribution/Binomial.hs | gpl-2.0 | 687 | 0 | 12 | 83 | 206 | 101 | 105 | -1 | -1 |
module Main ( main ) where
import Graphics.Rendering.OpenGL
import Graphics.UI.GLUT
import System.Environment ( getArgs )
import GL.Bindings ( display, idle, reshape )
import Program.EventHandle ( timerLoop, keyboardChar, mouse, mouseMotion
, passiveMotion, keyboardCharUp )
import qualified Program.State as P
-- |Main program that sets up the glut window and
-- some basic callbacks. Also creates the program
-- state to be shared between all the callbacks.
main :: IO ()
main = do
-- Initial state
arguments <- getArgs
state <- P.initializeState arguments
-- Set up GLUT window
(_,_) <- getArgsAndInitialize
initialDisplayMode $= [DoubleBuffered]
initialWindowSize $= Size 640 640
_ <- createWindow "Genetic Algorithm"
windowPosition $= Position 0 480
blend $= Enabled
blendEquation $= FuncAdd
blendFunc $= (SrcAlpha, OneMinusSrcAlpha)
-- Set up callbacks
displayCallback $= display state
idleCallback $= Just (idle state)
reshapeCallback $= Just (reshape state)
keyboardCallback $= Just (keyboardChar state)
keyboardUpCallback $= Just (keyboardCharUp state)
mouseCallback $= Just (mouse state)
motionCallback $= Just (mouseMotion state)
passiveMotionCallback $= Just (passiveMotion state)
-- Set up main control loop
addTimerCallback (P.msPerFrame state) (timerLoop state)
-- Start the program
mainLoop
| szbokhar/genetic-boxes | GuiInteractive.hs | gpl-2.0 | 1,519 | 0 | 10 | 389 | 353 | 179 | 174 | 30 | 1 |
{-
Copyright (C) 2006-2010 John MacFarlane <jgm@berkeley.edu>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
-}
{- |
Module : Text.Pandoc.Readers.LaTeX
Copyright : Copyright (C) 2006-2010 John MacFarlane
License : GNU GPL, version 2 or above
Maintainer : John MacFarlane <jgm@berkeley.edu>
Stability : alpha
Portability : portable
Conversion of LaTeX to 'Pandoc' document.
-}
module Text.Pandoc.Readers.LaTeX (
readLaTeX,
rawLaTeXInline,
rawLaTeXEnvironment'
) where
import Text.ParserCombinators.Parsec
import Text.Pandoc.Definition
import Text.Pandoc.Shared
import Text.Pandoc.Parsing
import Data.Maybe ( fromMaybe )
import Data.Char ( chr, toUpper )
import Data.List ( intercalate, isPrefixOf, isSuffixOf )
import Control.Monad
-- | Parse LaTeX from string and return 'Pandoc' document.
readLaTeX :: ParserState -- ^ Parser state, including options for parser
-> String -- ^ String to parse (assumes @'\n'@ line endings)
-> Pandoc
readLaTeX = readWith parseLaTeX
-- characters with special meaning
specialChars :: [Char]
specialChars = "\\`$%^&_~#{}[]\n \t|<>'\"-"
--
-- utility functions
--
-- | Returns text between brackets and its matching pair.
bracketedText :: Char -> Char -> GenParser Char st [Char]
bracketedText openB closeB = do
result <- charsInBalanced openB closeB anyChar
return $ [openB] ++ result ++ [closeB]
-- | Returns an option or argument of a LaTeX command.
optOrArg :: GenParser Char st [Char]
optOrArg = try $ spaces >> (bracketedText '{' '}' <|> bracketedText '[' ']')
-- | True if the string begins with '{'.
isArg :: [Char] -> Bool
isArg ('{':_) = True
isArg _ = False
-- | Returns list of options and arguments of a LaTeX command.
commandArgs :: GenParser Char st [[Char]]
commandArgs = many optOrArg
-- | Parses LaTeX command, returns (name, star, list of options or arguments).
command :: GenParser Char st ([Char], [Char], [[Char]])
command = do
char '\\'
name <- many1 letter
star <- option "" (string "*") -- some commands have starred versions
args <- commandArgs
return (name, star, args)
begin :: [Char] -> GenParser Char st [Char]
begin name = try $ do
string "\\begin"
spaces
char '{'
string name
char '}'
optional commandArgs
spaces
return name
end :: [Char] -> GenParser Char st [Char]
end name = try $ do
string "\\end"
spaces
char '{'
string name
char '}'
return name
-- | Returns a list of block elements containing the contents of an
-- environment.
environment :: [Char] -> GenParser Char ParserState [Block]
environment name = try $ begin name >> spaces >> manyTill block (end name) >>~ spaces
anyEnvironment :: GenParser Char ParserState Block
anyEnvironment = try $ do
string "\\begin"
spaces
char '{'
name <- many letter
star <- option "" (string "*") -- some environments have starred variants
char '}'
optional commandArgs
spaces
contents <- manyTill block (end (name ++ star))
spaces
return $ BlockQuote contents
--
-- parsing documents
--
-- | Process LaTeX preamble, extracting metadata.
processLaTeXPreamble :: GenParser Char ParserState ()
processLaTeXPreamble = do
try $ string "\\documentclass"
skipMany $ bibliographic <|> macro <|> commentBlock <|> skipChar
-- | Parse LaTeX and return 'Pandoc'.
parseLaTeX :: GenParser Char ParserState Pandoc
parseLaTeX = do
spaces
skipMany $ comment >> spaces
blocks <- try (processLaTeXPreamble >> environment "document")
<|> (many block >>~ (spaces >> eof))
state <- getState
let blocks' = filter (/= Null) blocks
let title' = stateTitle state
let authors' = stateAuthors state
let date' = stateDate state
return $ Pandoc (Meta title' authors' date') blocks'
--
-- parsing blocks
--
parseBlocks :: GenParser Char ParserState [Block]
parseBlocks = spaces >> many block
block :: GenParser Char ParserState Block
block = choice [ hrule
, codeBlock
, header
, list
, blockQuote
, simpleTable
, commentBlock
, macro
, bibliographic
, para
, itemBlock
, unknownEnvironment
, ignore
, unknownCommand
] <?> "block"
--
-- header blocks
--
header :: GenParser Char ParserState Block
header = section <|> chapter
chapter :: GenParser Char ParserState Block
chapter = try $ do
string "\\chapter"
result <- headerWithLevel 1
updateState $ \s -> s{ stateHasChapters = True }
return result
section :: GenParser Char ParserState Block
section = try $ do
char '\\'
subs <- many (try (string "sub"))
base <- try (string "section" >> return 1) <|> (string "paragraph" >> return 4)
st <- getState
let lev = if stateHasChapters st
then length subs + base + 1
else length subs + base
headerWithLevel lev
headerWithLevel :: Int -> GenParser Char ParserState Block
headerWithLevel lev = try $ do
spaces
optional (char '*')
spaces
optional $ bracketedText '[' ']' -- alt title
spaces
char '{'
title' <- manyTill inline (char '}')
spaces
return $ Header lev (normalizeSpaces title')
--
-- hrule block
--
hrule :: GenParser Char st Block
hrule = oneOfStrings [ "\\begin{center}\\rule{3in}{0.4pt}\\end{center}\n\n",
"\\newpage" ] >> spaces >> return HorizontalRule
-- tables
simpleTable :: GenParser Char ParserState Block
simpleTable = try $ do
string "\\begin"
spaces
string "{tabular}"
spaces
aligns <- parseAligns
let cols = length aligns
optional hline
header' <- option [] $ parseTableHeader cols
rows <- many (parseTableRow cols >>~ optional hline)
spaces
end "tabular"
spaces
let header'' = if null header'
then replicate cols []
else header'
return $ Table [] aligns (replicate cols 0) header'' rows
hline :: GenParser Char st ()
hline = try $ spaces >> string "\\hline" >> return ()
parseAligns :: GenParser Char ParserState [Alignment]
parseAligns = try $ do
char '{'
optional $ char '|'
let cAlign = char 'c' >> return AlignCenter
let lAlign = char 'l' >> return AlignLeft
let rAlign = char 'r' >> return AlignRight
let alignChar = cAlign <|> lAlign <|> rAlign
aligns' <- sepEndBy alignChar (optional $ char '|')
char '}'
spaces
return aligns'
parseTableHeader :: Int -- ^ number of columns
-> GenParser Char ParserState [TableCell]
parseTableHeader cols = try $ do
cells' <- parseTableRow cols
hline
return cells'
parseTableRow :: Int -- ^ number of columns
-> GenParser Char ParserState [TableCell]
parseTableRow cols = try $ do
let tableCellInline = notFollowedBy (char '&' <|>
(try $ char '\\' >> char '\\')) >> inline
cells' <- sepBy (spaces >> liftM ((:[]) . Plain . normalizeSpaces)
(many tableCellInline)) (char '&')
guard $ length cells' == cols
spaces
(try $ string "\\\\" >> spaces) <|>
(lookAhead (end "tabular") >> return ())
return cells'
--
-- code blocks
--
codeBlock :: GenParser Char ParserState Block
codeBlock = codeBlockWith "verbatim" <|> codeBlockWith "Verbatim" <|> codeBlockWith "lstlisting" <|> lhsCodeBlock
-- Note: Verbatim is from fancyvrb.
codeBlockWith :: String -> GenParser Char st Block
codeBlockWith env = try $ do
string "\\begin"
spaces -- don't use begin function because it
string $ "{" ++ env ++ "}" -- gobbles whitespace; we want to gobble
optional blanklines -- blank lines, but not leading space
contents <- manyTill anyChar (try (string $ "\\end{" ++ env ++ "}"))
spaces
let classes = if env == "code" then ["haskell"] else []
return $ CodeBlock ("",classes,[]) (stripTrailingNewlines contents)
lhsCodeBlock :: GenParser Char ParserState Block
lhsCodeBlock = do
failUnlessLHS
(CodeBlock (_,_,_) cont) <- codeBlockWith "code"
return $ CodeBlock ("", ["sourceCode","literate","haskell"], []) cont
--
-- block quotes
--
blockQuote :: GenParser Char ParserState Block
blockQuote = (environment "quote" <|> environment "quotation") >>~ spaces >>=
return . BlockQuote
--
-- list blocks
--
list :: GenParser Char ParserState Block
list = bulletList <|> orderedList <|> definitionList <?> "list"
listItem :: GenParser Char ParserState ([Inline], [Block])
listItem = try $ do
("item", _, args) <- command
spaces
state <- getState
let oldParserContext = stateParserContext state
updateState (\s -> s {stateParserContext = ListItemState})
blocks <- many block
updateState (\s -> s {stateParserContext = oldParserContext})
opt <- case args of
([x]) | "[" `isPrefixOf` x && "]" `isSuffixOf` x ->
parseFromString (many inline) $ tail $ init x
_ -> return []
return (opt, blocks)
orderedList :: GenParser Char ParserState Block
orderedList = try $ do
string "\\begin"
spaces
string "{enumerate}"
spaces
(_, style, delim) <- option (1, DefaultStyle, DefaultDelim) $
try $ do failIfStrict
char '['
res <- anyOrderedListMarker
char ']'
return res
spaces
option "" $ try $ do string "\\setlength{\\itemindent}"
char '{'
manyTill anyChar (char '}')
spaces
start <- option 1 $ try $ do failIfStrict
string "\\setcounter{enum"
many1 (oneOf "iv")
string "}{"
num <- many1 digit
char '}'
spaces
return $ (read num) + 1
items <- many listItem
end "enumerate"
spaces
return $ OrderedList (start, style, delim) $ map snd items
bulletList :: GenParser Char ParserState Block
bulletList = try $ do
begin "itemize"
items <- many listItem
end "itemize"
spaces
return (BulletList $ map snd items)
definitionList :: GenParser Char ParserState Block
definitionList = try $ do
begin "description"
items <- many listItem
end "description"
spaces
return $ DefinitionList $ map (\(t,d) -> (t,[d])) items
--
-- paragraph block
--
para :: GenParser Char ParserState Block
para = do
res <- many1 inline
spaces
return $ if null (filter (`notElem` [Str "", Space]) res)
then Null
else Para $ normalizeSpaces res
--
-- title authors date
--
bibliographic :: GenParser Char ParserState Block
bibliographic = choice [ maketitle, title, subtitle, authors, date ]
maketitle :: GenParser Char st Block
maketitle = try (string "\\maketitle") >> spaces >> return Null
title :: GenParser Char ParserState Block
title = try $ do
string "\\title{"
tit <- manyTill inline (char '}')
spaces
updateState (\state -> state { stateTitle = tit })
return Null
subtitle :: GenParser Char ParserState Block
subtitle = try $ do
string "\\subtitle{"
tit <- manyTill inline (char '}')
spaces
updateState (\state -> state { stateTitle = stateTitle state ++
Str ":" : LineBreak : tit })
return Null
authors :: GenParser Char ParserState Block
authors = try $ do
string "\\author{"
let andsep = try $ string "\\and" >> notFollowedBy letter >>
spaces >> return '&'
raw <- sepBy (many $ notFollowedBy (char '}' <|> andsep) >> inline) andsep
let authors' = map normalizeSpaces raw
char '}'
spaces
updateState (\s -> s { stateAuthors = authors' })
return Null
date :: GenParser Char ParserState Block
date = try $ do
string "\\date{"
date' <- manyTill inline (char '}')
spaces
updateState (\state -> state { stateDate = normalizeSpaces date' })
return Null
--
-- item block
-- for use in unknown environments that aren't being parsed as raw latex
--
-- this forces items to be parsed in different blocks
itemBlock :: GenParser Char ParserState Block
itemBlock = try $ do
("item", _, args) <- command
state <- getState
if stateParserContext state == ListItemState
then fail "item should be handled by list block"
else if null args
then return Null
else return $ Plain [Str (stripFirstAndLast (head args))]
--
-- raw LaTeX
--
-- | Parse any LaTeX environment and return a Para block containing
-- the whole literal environment as raw TeX.
rawLaTeXEnvironment :: GenParser Char st Block
rawLaTeXEnvironment = do
contents <- rawLaTeXEnvironment'
spaces
return $ RawBlock "latex" contents
-- | Parse any LaTeX environment and return a string containing
-- the whole literal environment as raw TeX.
rawLaTeXEnvironment' :: GenParser Char st String
rawLaTeXEnvironment' = try $ do
string "\\begin"
spaces
char '{'
name <- many1 letter
star <- option "" (string "*") -- for starred variants
let name' = name ++ star
char '}'
args <- option [] commandArgs
let argStr = concat args
contents <- manyTill (choice [ (many1 (noneOf "\\")),
rawLaTeXEnvironment',
string "\\" ])
(end name')
return $ "\\begin{" ++ name' ++ "}" ++ argStr ++
concat contents ++ "\\end{" ++ name' ++ "}"
unknownEnvironment :: GenParser Char ParserState Block
unknownEnvironment = try $ do
state <- getState
result <- if stateParseRaw state -- check whether we should include raw TeX
then rawLaTeXEnvironment -- if so, get whole raw environment
else anyEnvironment -- otherwise just the contents
return result
-- \ignore{} is used conventionally in literate haskell for definitions
-- that are to be processed by the compiler but not printed.
ignore :: GenParser Char ParserState Block
ignore = try $ do
("ignore", _, _) <- command
spaces
return Null
demacro :: (String, String, [String]) -> GenParser Char ParserState Inline
demacro (n,st,args) = try $ do
let raw = "\\" ++ n ++ st ++ concat args
s' <- applyMacros' raw
if raw == s'
then return $ RawInline "latex" raw
else do
inp <- getInput
setInput $ s' ++ inp
return $ Str ""
unknownCommand :: GenParser Char ParserState Block
unknownCommand = try $ do
spaces
notFollowedBy' $ oneOfStrings ["\\begin","\\end","\\item"] >>
notFollowedBy letter
state <- getState
when (stateParserContext state == ListItemState) $
notFollowedBy' (string "\\item")
if stateParseRaw state
then command >>= demacro >>= return . Plain . (:[])
else do
(name, _, args) <- command
spaces
unless (name `elem` commandsToIgnore) $ do
-- put arguments back in input to be parsed
inp <- getInput
setInput $ intercalate " " args ++ inp
return Null
commandsToIgnore :: [String]
commandsToIgnore = ["special","pdfannot","pdfstringdef", "index","bibliography"]
skipChar :: GenParser Char ParserState Block
skipChar = do
satisfy (/='\\') <|>
(notFollowedBy' (try $
string "\\begin" >> spaces >> string "{document}") >>
anyChar)
spaces
return Null
commentBlock :: GenParser Char st Block
commentBlock = many1 (comment >> spaces) >> return Null
--
-- inline
--
inline :: GenParser Char ParserState Inline
inline = choice [ str
, endline
, whitespace
, quoted
, apostrophe
, strong
, math
, ellipses
, emDash
, enDash
, hyphen
, emph
, strikeout
, superscript
, subscript
, code
, url
, link
, image
, footnote
, linebreak
, accentedChar
, nonbreakingSpace
, cite
, specialChar
, ensureMath
, rawLaTeXInline'
, escapedChar
, emptyGroup
, unescapedChar
, comment
] <?> "inline"
-- latex comment
comment :: GenParser Char st Inline
comment = try $ char '%' >> manyTill anyChar newline >> spaces >> return (Str "")
accentedChar :: GenParser Char st Inline
accentedChar = normalAccentedChar <|> specialAccentedChar
normalAccentedChar :: GenParser Char st Inline
normalAccentedChar = try $ do
char '\\'
accent <- oneOf "'`^\"~"
character <- (try $ char '{' >> letter >>~ char '}') <|> letter
let table = fromMaybe [] $ lookup character accentTable
let result = case lookup accent table of
Just num -> chr num
Nothing -> '?'
return $ Str [result]
-- an association list of letters and association list of accents
-- and decimal character numbers.
accentTable :: [(Char, [(Char, Int)])]
accentTable =
[ ('A', [('`', 192), ('\'', 193), ('^', 194), ('~', 195), ('"', 196)]),
('E', [('`', 200), ('\'', 201), ('^', 202), ('"', 203)]),
('I', [('`', 204), ('\'', 205), ('^', 206), ('"', 207)]),
('N', [('~', 209)]),
('O', [('`', 210), ('\'', 211), ('^', 212), ('~', 213), ('"', 214)]),
('U', [('`', 217), ('\'', 218), ('^', 219), ('"', 220)]),
('a', [('`', 224), ('\'', 225), ('^', 227), ('"', 228)]),
('e', [('`', 232), ('\'', 233), ('^', 234), ('"', 235)]),
('i', [('`', 236), ('\'', 237), ('^', 238), ('"', 239)]),
('n', [('~', 241)]),
('o', [('`', 242), ('\'', 243), ('^', 244), ('~', 245), ('"', 246)]),
('u', [('`', 249), ('\'', 250), ('^', 251), ('"', 252)]) ]
specialAccentedChar :: GenParser Char st Inline
specialAccentedChar = choice [ ccedil, aring, iuml, szlig, aelig, lslash,
oslash, pound, euro, copyright, sect ]
ccedil :: GenParser Char st Inline
ccedil = try $ do
char '\\'
letter' <- oneOfStrings ["cc", "cC"]
notFollowedBy letter
let num = if letter' == "cc" then 231 else 199
return $ Str [chr num]
aring :: GenParser Char st Inline
aring = try $ do
char '\\'
letter' <- oneOfStrings ["aa", "AA"]
notFollowedBy letter
let num = if letter' == "aa" then 229 else 197
return $ Str [chr num]
iuml :: GenParser Char st Inline
iuml = try (string "\\\"") >> oneOfStrings ["\\i", "{\\i}"] >>
return (Str [chr 239])
szlig :: GenParser Char st Inline
szlig = try (string "\\ss") >> notFollowedBy letter >> return (Str [chr 223])
oslash :: GenParser Char st Inline
oslash = try $ do
char '\\'
letter' <- choice [char 'o', char 'O']
notFollowedBy letter
let num = if letter' == 'o' then 248 else 216
return $ Str [chr num]
lslash :: GenParser Char st Inline
lslash = try $ do
cmd <- oneOfStrings ["{\\L}","{\\l}"]
<|> (oneOfStrings ["\\L ","\\l "] >>~ notFollowedBy letter)
return $ if 'l' `elem` cmd
then Str "\x142"
else Str "\x141"
aelig :: GenParser Char st Inline
aelig = try $ do
char '\\'
letter' <- oneOfStrings ["ae", "AE"]
notFollowedBy letter
let num = if letter' == "ae" then 230 else 198
return $ Str [chr num]
pound :: GenParser Char st Inline
pound = try (string "\\pounds" >> notFollowedBy letter) >> return (Str [chr 163])
euro :: GenParser Char st Inline
euro = try (string "\\euro" >> notFollowedBy letter) >> return (Str [chr 8364])
copyright :: GenParser Char st Inline
copyright = try (string "\\copyright" >> notFollowedBy letter) >> return (Str [chr 169])
sect :: GenParser Char st Inline
sect = try (string "\\S" >> notFollowedBy letter) >> return (Str [chr 167])
escapedChar :: GenParser Char st Inline
escapedChar = do
result <- escaped (oneOf specialChars)
return $ if result == '\n' then Str " " else Str [result]
emptyGroup :: GenParser Char st Inline
emptyGroup = try $ do
char '{'
spaces
char '}'
return $ Str ""
-- nonescaped special characters
unescapedChar :: GenParser Char st Inline
unescapedChar = oneOf "`$^&_#{}[]|<>" >>= return . (\c -> Str [c])
specialChar :: GenParser Char st Inline
specialChar = choice [ spacer, interwordSpace, sentenceEnd,
backslash, tilde, caret,
bar, lt, gt, doubleQuote ]
spacer :: GenParser Char st Inline
spacer = try (string "\\,") >> return (Str "")
sentenceEnd :: GenParser Char st Inline
sentenceEnd = try (string "\\@") >> return (Str "")
interwordSpace :: GenParser Char st Inline
interwordSpace = try (string "\\ ") >> return (Str "\160")
backslash :: GenParser Char st Inline
backslash = try (string "\\textbackslash") >> optional (try $ string "{}") >> return (Str "\\")
tilde :: GenParser Char st Inline
tilde = try (string "\\ensuremath{\\sim}") >> return (Str "~")
caret :: GenParser Char st Inline
caret = try (string "\\^{}") >> return (Str "^")
bar :: GenParser Char st Inline
bar = try (string "\\textbar") >> optional (try $ string "{}") >> return (Str "\\")
lt :: GenParser Char st Inline
lt = try (string "\\textless") >> optional (try $ string "{}") >> return (Str "<")
gt :: GenParser Char st Inline
gt = try (string "\\textgreater") >> optional (try $ string "{}") >> return (Str ">")
doubleQuote :: GenParser Char st Inline
doubleQuote = char '"' >> return (Str "\"")
code :: GenParser Char ParserState Inline
code = code1 <|> code2 <|> code3 <|> lhsInlineCode
code1 :: GenParser Char st Inline
code1 = try $ do
string "\\verb"
marker <- anyChar
result <- manyTill anyChar (char marker)
return $ Code nullAttr $ removeLeadingTrailingSpace result
code2 :: GenParser Char st Inline
code2 = try $ do
string "\\texttt{"
result <- manyTill (noneOf "\\\n~$%^&{}") (char '}')
return $ Code nullAttr result
code3 :: GenParser Char st Inline
code3 = try $ do
string "\\lstinline"
marker <- anyChar
result <- manyTill anyChar (char marker)
return $ Code nullAttr $ removeLeadingTrailingSpace result
lhsInlineCode :: GenParser Char ParserState Inline
lhsInlineCode = try $ do
failUnlessLHS
char '|'
result <- manyTill (noneOf "|\n") (char '|')
return $ Code ("",["haskell"],[]) result
emph :: GenParser Char ParserState Inline
emph = try $ oneOfStrings [ "\\emph{", "\\textit{" ] >>
manyTill inline (char '}') >>= return . Emph
strikeout :: GenParser Char ParserState Inline
strikeout = try $ string "\\sout{" >> manyTill inline (char '}') >>=
return . Strikeout
superscript :: GenParser Char ParserState Inline
superscript = try $ string "\\textsuperscript{" >>
manyTill inline (char '}') >>= return . Superscript
-- note: \textsubscript isn't a standard latex command, but we use
-- a defined version in pandoc.
subscript :: GenParser Char ParserState Inline
subscript = try $ string "\\textsubscript{" >> manyTill inline (char '}') >>=
return . Subscript
apostrophe :: GenParser Char ParserState Inline
apostrophe = char '\'' >> return (Str "\x2019")
quoted :: GenParser Char ParserState Inline
quoted = doubleQuoted <|> singleQuoted
singleQuoted :: GenParser Char ParserState Inline
singleQuoted = enclosed singleQuoteStart singleQuoteEnd inline >>=
return . Quoted SingleQuote . normalizeSpaces
doubleQuoted :: GenParser Char ParserState Inline
doubleQuoted = enclosed doubleQuoteStart doubleQuoteEnd inline >>=
return . Quoted DoubleQuote . normalizeSpaces
singleQuoteStart :: GenParser Char st Char
singleQuoteStart = char '`'
singleQuoteEnd :: GenParser Char st ()
singleQuoteEnd = try $ char '\'' >> notFollowedBy alphaNum
doubleQuoteStart :: CharParser st String
doubleQuoteStart = string "``"
doubleQuoteEnd :: CharParser st String
doubleQuoteEnd = try $ string "''"
ellipses :: GenParser Char st Inline
ellipses = try $ do
char '\\'
optional $ char 'l'
string "dots"
optional $ try $ string "{}"
return (Str "…")
enDash :: GenParser Char st Inline
enDash = try (string "--") >> return (Str "-")
emDash :: GenParser Char st Inline
emDash = try (string "---") >> return (Str "—")
hyphen :: GenParser Char st Inline
hyphen = char '-' >> return (Str "-")
strong :: GenParser Char ParserState Inline
strong = try (string "\\textbf{") >> manyTill inline (char '}') >>=
return . Strong
whitespace :: GenParser Char st Inline
whitespace = many1 (oneOf " \t") >> return Space
nonbreakingSpace :: GenParser Char st Inline
nonbreakingSpace = char '~' >> return (Str "\160")
-- hard line break
linebreak :: GenParser Char st Inline
linebreak = try $ do
string "\\\\"
optional $ bracketedText '[' ']' -- e.g. \\[10pt]
spaces
return LineBreak
str :: GenParser Char st Inline
str = many1 (noneOf specialChars) >>= return . Str
-- endline internal to paragraph
endline :: GenParser Char st Inline
endline = try $ newline >> notFollowedBy blankline >> return Space
-- math
math :: GenParser Char ParserState Inline
math = (math3 >>= applyMacros' >>= return . Math DisplayMath)
<|> (math1 >>= applyMacros' >>= return . Math InlineMath)
<|> (math2 >>= applyMacros' >>= return . Math InlineMath)
<|> (math4 >>= applyMacros' >>= return . Math DisplayMath)
<|> (math5 >>= applyMacros' >>= return . Math DisplayMath)
<|> (math6 >>= applyMacros' >>= return . Math DisplayMath)
<?> "math"
math1 :: GenParser Char st String
math1 = try $ char '$' >> manyTill anyChar (char '$')
math2 :: GenParser Char st String
math2 = try $ string "\\(" >> manyTill anyChar (try $ string "\\)")
math3 :: GenParser Char st String
math3 = try $ char '$' >> math1 >>~ char '$'
math4 :: GenParser Char st String
math4 = try $ do
name <- begin "displaymath" <|> begin "equation" <|> begin "equation*" <|>
begin "gather" <|> begin "gather*" <|> begin "gathered" <|>
begin "multline" <|> begin "multline*"
manyTill anyChar (end name)
math5 :: GenParser Char st String
math5 = try $ (string "\\[") >> spaces >> manyTill anyChar (try $ string "\\]")
math6 :: GenParser Char st String
math6 = try $ do
name <- begin "eqnarray" <|> begin "eqnarray*" <|> begin "align" <|>
begin "align*" <|> begin "alignat" <|> begin "alignat*" <|>
begin "split" <|> begin "aligned" <|> begin "alignedat"
res <- manyTill anyChar (end name)
return $ filter (/= '&') res -- remove alignment codes
ensureMath :: GenParser Char st Inline
ensureMath = try $ do
(n, _, args) <- command
guard $ n == "ensuremath" && not (null args)
return $ Math InlineMath $ tail $ init $ head args
--
-- links and images
--
url :: GenParser Char ParserState Inline
url = try $ do
string "\\url"
url' <- charsInBalanced '{' '}' anyChar
return $ Link [Code ("",["url"],[]) url'] (escapeURI url', "")
link :: GenParser Char ParserState Inline
link = try $ do
string "\\href{"
url' <- manyTill anyChar (char '}')
char '{'
label' <- manyTill inline (char '}')
return $ Link (normalizeSpaces label') (escapeURI url', "")
image :: GenParser Char ParserState Inline
image = try $ do
("includegraphics", _, args) <- command
let args' = filter isArg args -- filter out options
let (src,tit) = case args' of
[] -> ("", "")
(x:_) -> (stripFirstAndLast x, "")
return $ Image [Str "image"] (escapeURI src, tit)
footnote :: GenParser Char ParserState Inline
footnote = try $ do
(name, _, (contents:[])) <- command
if ((name == "footnote") || (name == "thanks"))
then string ""
else fail "not a footnote or thanks command"
let contents' = stripFirstAndLast contents
-- parse the extracted block, which may contain various block elements:
rest <- getInput
setInput $ contents'
blocks <- parseBlocks
setInput rest
return $ Note blocks
-- | citations
cite :: GenParser Char ParserState Inline
cite = simpleCite <|> complexNatbibCites
simpleCiteArgs :: GenParser Char ParserState [Citation]
simpleCiteArgs = try $ do
first <- optionMaybe $ (char '[') >> manyTill inline (char ']')
second <- optionMaybe $ (char '[') >> manyTill inline (char ']')
char '{'
keys <- many1Till citationLabel (char '}')
let (pre, suf) = case (first , second ) of
(Just s , Nothing) -> ([], s )
(Just s , Just t ) -> (s , t )
_ -> ([], [])
conv k = Citation { citationId = k
, citationPrefix = []
, citationSuffix = []
, citationMode = NormalCitation
, citationHash = 0
, citationNoteNum = 0
}
return $ addPrefix pre $ addSuffix suf $ map conv keys
simpleCite :: GenParser Char ParserState Inline
simpleCite = try $ do
char '\\'
let biblatex = [a ++ "cite" | a <- ["auto", "foot", "paren", "super", ""]]
++ ["footcitetext"]
normal = ["cite" ++ a ++ b | a <- ["al", ""], b <- ["p", "p*", ""]]
++ biblatex
supress = ["citeyearpar", "citeyear", "autocite*", "cite*", "parencite*"]
intext = ["textcite"] ++ ["cite" ++ a ++ b | a <- ["al", ""], b <- ["t", "t*"]]
mintext = ["textcites"]
mnormal = map (++ "s") biblatex
cmdend = notFollowedBy (letter <|> char '*')
capit [] = []
capit (x:xs) = toUpper x : xs
addUpper xs = xs ++ map capit xs
toparser l t = try $ oneOfStrings (addUpper l) >> cmdend >> return t
(mode, multi) <- toparser normal (NormalCitation, False)
<|> toparser supress (SuppressAuthor, False)
<|> toparser intext (AuthorInText , False)
<|> toparser mnormal (NormalCitation, True )
<|> toparser mintext (AuthorInText , True )
cits <- if multi then
many1 simpleCiteArgs
else
simpleCiteArgs >>= \c -> return [c]
let (c:cs) = concat cits
cits' = case mode of
AuthorInText -> c {citationMode = mode} : cs
_ -> map (\a -> a {citationMode = mode}) (c:cs)
return $ Cite cits' []
complexNatbibCites :: GenParser Char ParserState Inline
complexNatbibCites = complexNatbibTextual <|> complexNatbibParenthetical
complexNatbibTextual :: GenParser Char ParserState Inline
complexNatbibTextual = try $ do
string "\\citeauthor{"
manyTill (noneOf "}") (char '}')
skipSpaces
Cite (c:cs) _ <- complexNatbibParenthetical
return $ Cite (c {citationMode = AuthorInText} : cs) []
complexNatbibParenthetical :: GenParser Char ParserState Inline
complexNatbibParenthetical = try $ do
string "\\citetext{"
cits <- many1Till parseOne (char '}')
return $ Cite (concat cits) []
where
parseOne = do
skipSpaces
pref <- many (notFollowedBy (oneOf "\\}") >> inline)
(Cite cites _) <- simpleCite
suff <- many (notFollowedBy (oneOf "\\};") >> inline)
skipSpaces
optional $ char ';'
return $ addPrefix pref $ addSuffix suff $ cites
addPrefix :: [Inline] -> [Citation] -> [Citation]
addPrefix p (k:ks) = k {citationPrefix = p ++ citationPrefix k} : ks
addPrefix _ _ = []
addSuffix :: [Inline] -> [Citation] -> [Citation]
addSuffix s ks@(_:_) = let k = last ks
in init ks ++ [k {citationSuffix = citationSuffix k ++ s}]
addSuffix _ _ = []
citationLabel :: GenParser Char ParserState String
citationLabel = do
res <- many1 $ noneOf ",}"
optional $ char ','
return $ removeLeadingTrailingSpace res
-- | Parse any LaTeX inline command and return it in a raw TeX inline element.
rawLaTeXInline' :: GenParser Char ParserState Inline
rawLaTeXInline' = do
notFollowedBy' $ oneOfStrings ["\\begin", "\\end", "\\item", "\\ignore",
"\\section"]
rawLaTeXInline
-- | Parse any LaTeX command and return it in a raw TeX inline element.
rawLaTeXInline :: GenParser Char ParserState Inline
rawLaTeXInline = try $ do
state <- getState
if stateParseRaw state
then command >>= demacro
else do
(name,st,args) <- command
x <- demacro (name,st,args)
unless (x == Str "" || name `elem` commandsToIgnore) $ do
inp <- getInput
setInput $ intercalate " " args ++ inp
return $ Str ""
| sol/pandoc | src/Text/Pandoc/Readers/LaTeX.hs | gpl-2.0 | 33,352 | 0 | 18 | 8,835 | 10,551 | 5,304 | 5,247 | 788 | 4 |
{- $Id: Command.hs,v 1.2 2003/11/10 21:28:58 antony Exp $
*****************************************************************************
* I N V A D E R S *
* *
* Module: Command *
* Purpose: The Invader command type. *
* Author: Henrik Nilsson *
* *
* Copyright (c) Yale University, 2003 *
* *
*****************************************************************************
-}
module Command where
data Command =
CmdQuit -- Quit Invaders.
| CmdNewGame -- Play game.
| CmdFreeze -- Freeze game.
| CmdResume -- Resume game.
-- | CmdUp -- Move Up.
-- | CmdDown -- Move Down.
-- | CmdLeft -- Move Left.
-- | CmdRight -- Move Right.
| pushkinma/frag | src/Command.hs | gpl-2.0 | 1,158 | 0 | 5 | 632 | 29 | 22 | 7 | 6 | 0 |
module Amoeba.GameLogic.Data.World where
import qualified Control.Lens as L
import qualified Data.Map as M
import Prelude hiding (null, lookup)
import Amoeba.GameLogic.Data.Object
import Amoeba.Middleware.Math.Geometry
data Effect = Effect
deriving (Show, Read, Eq)
type Effects = [Effect]
type EffectMap = M.Map ObjectId Effects
type WorldMap = M.Map Point Object
data World = World { worldMap :: WorldMap
, worldEffects :: EffectMap
, width :: Int
, height :: Int
, defaultCell :: Maybe Object
}
deriving (Show, Read, Eq)
emptyWorld = World M.empty M.empty 0 0 Nothing
insertObject point object w@(World wm _ _ _ _) = w { worldMap = M.insert point object wm }
worldMapSize (World wm _ _ _ _) = M.size wm
| graninas/The-Amoeba-World | src/Amoeba/GameLogic/Data/World.hs | gpl-3.0 | 815 | 0 | 9 | 222 | 257 | 149 | 108 | 20 | 1 |
{-# LANGUAGE ExistentialQuantification #-}
{-# LANGUAGE OverloadedStrings #-}
{-# OPTIONS_GHC -fno-warn-orphans #-}
-- | HTML publishing module for the Anorak system.
module Anorak.Publisher (publishLeagues) where
import Anorak.Config
import Anorak.Results
import Anorak.RLTParser
import Anorak.Sequences
import Anorak.Aggregates
import Anorak.Tables
import Anorak.Goals
import Data.ByteString.Char8(ByteString)
import qualified Data.ByteString.Char8 as BS(append, filter, map, unpack, writeFile)
import Data.Char(isSpace, toLower)
import Data.List(foldl', groupBy, nub)
import Data.Map(Map, (!))
import qualified Data.Map as Map(alter, empty, findWithDefault, fromAscList, insert, keys, map, mapKeys, size, toDescList)
import Data.Set(Set)
import qualified Data.Set as Set(toList)
import Data.Time.Calendar(Day)
import System.Directory(createDirectoryIfMissing)
import System.FilePath((</>))
import Text.StringTemplate(getStringTemplate, render, setManyAttrib, STGroup, stShowsToSE)
import Text.StringTemplate.Classes(ToSElem(toSElem), SElem(SM))
import Text.StringTemplate.GenericStandard()
import Util.File(isNewer)
import Util.List(equal, takeAtLeast)
import Util.Maths(percentage, roundUp)
import Util.Tuple(pair)
instance ToSElem LeagueRecord where
toSElem record = SM $ Map.fromAscList [("adjustment", toSElem . adjustmentString $ adjustment record),
("against", toSElem $ against record),
("average", stShowsToSE $ pointsPerGame record),
("drawn", toSElem $ drawn record),
("for", toSElem $ for record),
("goalDiff", toSElem $ goalDiff record),
("lost", toSElem $ lost record),
("negativeGD", toSElem $ goalDiff record < 0), -- Goal difference could be neither +ve or -ve (i.e. zero).
("played", toSElem $ played record),
("points", toSElem $ points record),
("positiveGD", toSElem $ goalDiff record > 0), -- Goal difference could be neither +ve or -ve (i.e. zero).
("team", toSElem $ team record),
("won", toSElem $ won record)]
instance ToSElem Result where
toSElem result = SM $ Map.fromAscList [("awayGoals", toSElem . reduceScorers $ awayGoals result),
("awayScore", toSElem $ awayScore result),
("awayTeam", toSElem $ awayTeam result),
("date", toSElem $ date result),
("homeGoals", toSElem . reduceScorers $ homeGoals result),
("homeScore", toSElem $ homeScore result),
("homeTeam", toSElem $ homeTeam result)]
instance ToSElem TeamResult where
toSElem result = SM $ Map.fromAscList [("conceded", toSElem $ conceded result),
("day", toSElem $ day result),
("goals", toSElem . reduceScorers $ goalsFor result),
("opposition", toSElem $ opposition result),
("outcome", toSElem $ outcome result),
("scored", toSElem $ scored result),
("venue", toSElem $ venue result)]
-- | Existential quantified type for template values so that we can set multiple heterogenous attributes simultanteously.
data AttributeValue = forall a.(ToSElem a) => AV a
instance ToSElem AttributeValue where
toSElem (AV a) = toSElem a
-- | A page is defined by a (relative) path, a list of attributes and the name of the template that should be used to render it.
data Page = Page !FilePath ![(String, AttributeValue)]
| TeamPage !FilePath ![(String, AttributeValue)]
| MiniLeaguePage !FilePath ![(String, AttributeValue)]
| OptionalPage FilePath [(String, AttributeValue)] !Bool
-- | Data about the season being published, useful in templates.
data MetaData = MetaData {league :: String,
division :: String,
season :: String,
isAggregated :: Bool,
isCollated :: Bool,
isArchive :: Bool,
isNeutral :: Bool,
hasScorers :: Bool,
teamLinks :: Map String String,
miniLeaguesLink :: Maybe String}
instance ToSElem MetaData where
toSElem meta = SM $ Map.fromAscList [("division", toSElem $ division meta),
("hasScorers", toSElem $ hasScorers meta),
("isAggregated", toSElem $ isAggregated meta),
("isArchive", toSElem $ isArchive meta),
("isCollated", toSElem $ isCollated meta),
("league", toSElem $ league meta),
("miniLeaguesLink", toSElem $ miniLeaguesLink meta),
("neutral", toSElem $ isNeutral meta),
("season", toSElem $ season meta),
("teamLinks", SM . Map.map toSElem $ teamLinks meta)]
-- | Convert points adjustment into a string with +/- sign, or Nothing if there is no adjustment.
adjustmentString :: Int -> Maybe String
adjustmentString adj
| adj < 0 = Just $ show adj
| adj > 0 = Just $ '+' : show adj
| otherwise = Nothing
-- | If a player has scored more than one goal in the game, combine those goals into a single entry.
-- Returns a list of pairs, first item is the player's name, second is a string containing details of their goals.
reduceScorers :: [Goal] -> [(ByteString, String)]
reduceScorers goalsList = map (pair id (reducedMap!)) players
where reducedMap = foldl' addGoalToScorers Map.empty goalsList
players = nub $ map scorer goalsList
addGoalToScorers :: Map ByteString String -> Goal -> Map ByteString String
addGoalToScorers players goal = Map.alter (updateScorer goal) (scorer goal) players
where updateScorer g Nothing = Just $ goalTypeString g ++ show (minute g)
updateScorer g (Just d) = Just $ d ++ ", " ++ goalTypeString g ++ show (minute g)
goalTypeString :: Goal -> String
goalTypeString goal = case goalType goal of
"p" -> "pen "
"o" -> "o.g. "
_ -> ""
-- | Generates home, away and overall HTML league tables.
leagueTablePages :: Results -> MetaData -> LeagueData -> [Page]
leagueTablePages res meta lgData = [Page "index.html" (tableAttrib byTeam (adjustments lgData) (split lgData)),
OptionalPage "hometable.html" (tableAttrib homeOnly Map.empty 0) (not $ isNeutral meta),
OptionalPage "awaytable.html" (tableAttrib awayOnly Map.empty 0) (not $ isNeutral meta),
OptionalPage "firsthalftable.html" (tableAttrib firstHalf Map.empty 0) (hasScorers meta),
OptionalPage "secondhalftable.html" (tableAttrib secondHalf Map.empty 0) (hasScorers meta)]
where tableAttrib rf adj s = [("table", AV $ leagueTable (rf res) adj s),
("tableSelected", AV True),
("zones", AV $ zones lgData),
("metaData", AV meta)]
formTablePages :: Results -> MetaData -> [Page]
formTablePages res meta = [OptionalPage "formtable.html" (formAttrib byTeam 6) (not $ isArchive meta),
OptionalPage "homeformtable.html" (formAttrib homeOnly 4) (not $ isArchive meta && not (isNeutral meta)),
OptionalPage "awayformtable.html" (formAttrib awayOnly 4) (not $ isArchive meta && not (isNeutral meta))]
where formAttrib rf n = [("table", AV $ formTable (rf res) n), ("formSelected", AV True), ("metaData", AV meta)]
-- | Generates current and longest sequences for home, away and all matches.
sequencePages :: Results -> MetaData -> [Page]
sequencePages res meta = [OptionalPage "currentsequences.html" (seqAttribs overallCurrent "currentSequencesSelected") (not $ isArchive meta),
Page "longestsequences.html" (seqAttribs overallLongest "longestSequencesSelected"),
OptionalPage "homecurrentsequences.html" (seqAttribs homeCurrent "currentSequencesSelected") (not $ isArchive meta && not (isNeutral meta)),
OptionalPage "homelongestsequences.html" (seqAttribs homeLongest "longestSequencesSelected") (not $ isNeutral meta),
OptionalPage "awaycurrentsequences.html" (seqAttribs awayCurrent "currentSequencesSelected") (not $ isArchive meta && not (isNeutral meta)),
OptionalPage "awaylongestsequences.html" (seqAttribs awayLongest "longestSequencesSelected") (not $ isNeutral meta)]
where (overallCurrent, overallLongest) = getSequenceTables $ byTeam res
(homeCurrent, homeLongest) = getSequenceTables $ homeOnly res
(awayCurrent, awayLongest) = getSequenceTables $ awayOnly res
seqAttribs seqs sel = [("sequences", AV . Map.mapKeys show $ seqs), (sel, AV True), ("metaData", AV meta)]
-- | Generates team aggregates for all matches.
aggregatePages:: Results -> MetaData -> [Page]
aggregatePages res meta = [Page "aggregates.html" [("aggregates", AV . Map.mapKeys show . getAggregateTables $ byTeam res),
("aggregatesSelected", AV True),
("metaData", AV meta)]]
resultsPages :: Results -> MetaData -> [Page]
resultsPages res meta = [Page "results.html" attributes]
where homeWinMatches = homeWins $ list res
awayWinMatches = awayWins $ list res
matchCount = length $ list res
homeWinCount = length homeWinMatches
awayWinCount = length awayWinMatches
drawCount = matchCount - homeWinCount - awayWinCount
goalCount = sum . map aggregate $ list res
highAggregates = highestAggregates (list res)
attributes = [("results", AV . Map.toDescList $ byDate res),
("matches", AV matchCount),
("homeWins", AV homeWinCount),
("homeWinPercent", AV $ percentage homeWinCount matchCount),
("awayWins", AV awayWinCount),
("awayWinPercent", AV $ percentage awayWinCount matchCount),
("draws", AV drawCount),
("drawPercent", AV $ percentage drawCount matchCount),
("goals", AV goalCount),
("bigHomeWins", AV $ biggestWins homeWinMatches),
("bigAwayWins", AV $ biggestWins awayWinMatches),
("bigWins", AV . biggestWins $ list res),
("highAggregates", AV highAggregates),
("resultsSelected", AV True),
("metaData", AV meta)]
miniLeaguePages :: Results -> MetaData -> LeagueData -> [Page]
miniLeaguePages res meta lgData = map (miniLeaguePage res meta (aliases lgData) tabs) mLeagues
where mLeagues = miniLeagues lgData
tabs = map (pair id toHTMLFileName . fst) mLeagues -- Each tab is a display name and a file name.
miniLeaguePage :: Results -> MetaData -> Map ByteString Team -> [(ByteString, String)] -> (ByteString, Set Team) -> Page
miniLeaguePage res meta aliasMap tabs (name, members) = MiniLeaguePage (toHTMLFileName name) attributes
where selectedTabs = map (\(n, f) -> (n, f, n == name)) tabs -- Add a boolean "selected" flag to each tab.
attributes = [("table", AV $ miniLeagueTable members (byTeam res) aliasMap),
("miniLeaguesSelected", AV True),
("name", AV name),
("bottomTabs", AV selectedTabs),
("metaData", AV meta)]
teamPages :: Results -> MetaData -> LeagueData -> [Page]
teamPages res meta lgData = map (\t -> teamPage t res (positions ! t) meta) . Map.keys $ byTeam res
where positions = leaguePositions (teams lgData) (byDate res) (adjustments lgData) (split lgData)
-- | Generate the overview page for an individual team.
teamPage :: Team -> Results -> [(Day, Int)] -> MetaData -> Page
teamPage t res positions meta = TeamPage (teamLinks meta ! BS.unpack t) attributes
where homeResults = homeOnly res ! t
awayResults = awayOnly res ! t
teamResults = map (convertResult t) $ byTeam res ! t
(goalScorers, ownGoals) = teamGoalScorers teamResults
(goalsForByInterval, goalsAgainstByInterval) = goalsByInterval teamResults
-- Don't include all goal-scorers for aggregated pages because the list could be massive.
goalScorers' = if isAggregated meta then takeAtLeast 10 $ groupBy (equal snd) goalScorers else goalScorers
attributes = [("team", AV t),
("results", AV teamResults),
("record", AV . getSummary t $ byTeam res ! t),
("homeRecord", AV $ getSummary t homeResults),
("awayRecord", AV $ getSummary t awayResults),
("scorers", AV goalScorers'),
("ownGoals", AV $ show ownGoals),
("positions", AV positions),
("goalsByInterval", AV (goalsForByInterval, goalsAgainstByInterval)),
("intervalMaxima", AV (roundUp (maximum goalsForByInterval) 5, roundUp (maximum goalsAgainstByInterval) 5)),
("teamCount", AV . Map.size $ teamLinks meta),
("metaData", AV meta)]
getSummary :: Team -> [Result] -> (Int, Float, Int, Float, Int, Float)
getSummary t res = (won record,
percentage (won record) matches,
drawn record,
percentage (drawn record) matches,
lost record,
percentage (lost record) matches)
where record = buildRecord t res
matches = played record
-- | Generates the top scorers list (only if there are scorers in the data).
goalPages:: Results -> MetaData -> [Page]
goalPages res metaData = [OptionalPage "goals.html" attributes (not $ null players)]
where players = topGoalScorers $ list res
attributes = [("scorers", AV players),
("penalties", AV . topPenaltyScorers $ list res),
("hatTricks", AV . hatTricks $ list res),
("goalsSelected", AV True),
("metaData", AV metaData)]
-- | Convert a string for use as a filename (converts to lower case and eliminates whitespace).
toHTMLFileName :: ByteString -> String
toHTMLFileName name = BS.unpack $ BS.map toLower (BS.filter (not.isSpace) name) `BS.append` ".html"
-- | Take a list of team names and return mappings for the associated team pages.
mapTeamNames :: LeagueData -> Map String String
mapTeamNames leagueData = foldl' insert Map.empty names
where names = Set.toList $ teams leagueData
insert m t = Map.insert (BS.unpack t) (toHTMLFileName . Map.findWithDefault t t $ aliases leagueData) m
-- | Generates all stats pages for a given season.
seasonPages :: LeagueData -> MetaData -> [Page]
seasonPages lgData meta = concat $ map args2 [formTablePages, resultsPages, sequencePages, aggregatePages, goalPages]
++ map args3 [leagueTablePages, miniLeaguePages, teamPages]
where args2 = ($ meta).($ prepareResults (results lgData) (aliases lgData))
args3 = ($ lgData).args2
-- | Determine which file the "Mini-Leagues" tab should link to (derived from the name of the first mini-league).
-- If there are no mini-leagues then this function returns nothing and the tab should not be shown.
getMiniLeaguesLink :: [(ByteString, Set Team)] -> Maybe String
getMiniLeaguesLink [] = Nothing
getMiniLeaguesLink ((name, _):_) = Just $ toHTMLFileName name
publishLeagues :: STGroup ByteString -> Configuration -> IO ()
publishLeagues templates config = do publishPage templates (outputRoot config) $ Page "selector.json" [("config", AV config)]
mapM_ (publishLeague templates) $ leagues config
publishLeague :: STGroup ByteString -> League -> IO ()
publishLeague templates lg = mapM_ (publishDivision templates (leagueName lg)) $ divisions lg
publishDivision :: STGroup ByteString -> String -> Division -> IO ()
publishDivision templateGroup lgName lgDiv = mapM_ (publishSeason templateGroup lgName (divisionName lgDiv)) $ seasons lgDiv
publishSeason :: STGroup ByteString -> String -> String -> Season -> IO ()
publishSeason templates lgName divName divSeason = do let dataFile = inputFile divSeason
modified <- isNewer dataFile (outputDir divSeason </> "index.html")
if modified || aggregated divSeason || collated divSeason then
(do print $ "Processing " ++ dataFile
leagueData <- parseRLTFile dataFile
let links = mapTeamNames leagueData
meta = MetaData lgName
divName
(seasonName divSeason)
(aggregated divSeason)
(collated divSeason)
(archive divSeason)
(neutral divSeason)
(scorers divSeason)
links
(getMiniLeaguesLink $ miniLeagues leagueData)
createDirectoryIfMissing True (outputDir divSeason)
mapM_ (publishPage templates (outputDir divSeason)) $ seasonPages leagueData meta
)
else print $ "Skipping unchanged file " ++ dataFile
-- | Publish a single page by applying the appropriate HStringTemplate template.
publishPage :: STGroup ByteString -> FilePath -> Page -> IO ()
publishPage group dir (Page name attributes) = publish group dir attributes name name
publishPage group dir (TeamPage name attributes) = publish group dir attributes name "team.html"
publishPage group dir (MiniLeaguePage name attributes) = publish group dir attributes name "minileague.html"
publishPage group dir (OptionalPage name attributes True) = publish group dir attributes name name
publishPage _ _ _ = return ()
publish :: STGroup ByteString -> FilePath -> [(String, AttributeValue)] -> FilePath -> String -> IO ()
publish group dir attributes fileName templateName = case getStringTemplate templateName group of
Nothing -> print $ "Could not find template for " ++ templateName
Just template -> BS.writeFile (dir </> fileName) . render $ setManyAttrib attributes template
| dwdyer/anorak | src/haskell/Anorak/Publisher.hs | gpl-3.0 | 22,582 | 1 | 17 | 9,172 | 4,962 | 2,650 | 2,312 | 277 | 3 |
main :: IO () | hmemcpy/milewski-ctfp-pdf | src/content/3.5/code/haskell/snippet31.hs | gpl-3.0 | 13 | 0 | 6 | 3 | 11 | 5 | 6 | 1 | 0 |
{-# LANGUAGE DeriveDataTypeable, ScopedTypeVariables #-}
-- Generators in Haskell
--
-- We translate the in-order tree traversal example from an old article
-- Generators in Icon, Python, and Scheme, 2004.
-- http://okmij.org/ftp/Scheme/enumerators-callcc.html#Generators
--
-- using Haskell and delimited continuations rather than call/cc + mutation.
-- The code is shorter, and it even types.
-- To be honest, we actually translate the OCaml code generator.ml
-- This code is the extension of Generator1.hs; we use delimited
-- control not only to implement the generator. We also use delimited
-- control to accumulate the results in a list. We need two different
-- prompts then (with two different answer-types, as it happens).
-- This file illustrates the prompt flavors PP and PM, using newtypes
-- to define private global prompts (global prompts that are private to
-- the current module).
module Generator2 where
import Control.Monad.CC.CCCxe
import Control.Monad.Trans (liftIO, lift)
import Data.Typeable
{-
A sample program Python programmers seem to be proud of: an in-order
traversal of a tree:
>>>> # A recursive generator that generates Tree leaves in in-order.
>>> def inorder(t):
... if t:
... for x in inorder(t.left):
... yield x
... yield t.label
... for x in inorder(t.right):
... yield x
Given below is the complete implementation in Haskell.
-}
-- A few preliminaries: define the tree and build a sample tree
type Label = Int
data Tree = Leaf | Node Label Tree Tree deriving Show
make_full_tree :: Int -> Tree
make_full_tree depth = loop 1 depth
where
loop label 0 = Leaf
loop label n = Node label (loop (2*label) (pred n)) (loop (2*label+1) (pred n))
tree1 = make_full_tree 3
-- In Python, `yield' is a keyword. In Haskell, it is a regular function.
-- Furthermore, it is a user-defined function, in one line of code.
-- To get generators there is no need to extend a language.
-- ------------------------------------------------------------------------
-- First, we try the prompt flavor PP
-- The answer-type for one of the prompts
newtype ResP m a = ResP ( (a -> CC PP m ()) -> CC PP m () )
instance Typeable1 m => Typeable1 (ResP m) where
typeOf1 x = mkTyConApp (mkTyCon "ResP") [m]
where m = typeOf1 (undefined:: m ())
outResP body (ResP f) = f body
-- One prompt, used by the generator (the yield/enumerate pair)
-- We instantiate the global pp to the desired answer-type.
ppy :: (Typeable1 m, Typeable a) => Prompt PP m (ResP m a)
ppy = pp
-- The rest of the code, up to test_io, is the same as that in Generator1.hs
yieldP :: (Typeable1 m, Typeable a) => Monad m => a -> CC PP m ()
yieldP v = shift0P ppy (\k -> return . ResP $ \b -> b v >> k () >>= outResP b)
-- The enumerator: the for-loop essentially
enumerateP :: (Typeable1 m, Typeable a, Monad m) =>
CC PP m () -> (a -> CC PP m ()) -> CC PP m ()
enumerateP iterator body =
pushPrompt ppy (iterator >> (return . ResP . const $ return ())) >>=
outResP body
-- The in_order function itself: compare with the Python version
in_orderP :: (Typeable1 m, Monad m) => Tree -> CC PP m ()
in_orderP Leaf = return ()
in_orderP (Node label left right) = do
in_orderP left
yieldP label
in_orderP right
-- Print out the result of the in-order traversal
test_ioP :: IO ()
test_ioP = runCC $
enumerateP (in_orderP tree1) (liftIO .(print :: (Int -> IO ())))
-- 4 2 5 1 6 3 7
-- ------------------------------------------------------------------------
-- Using the prompt flavor PM
-- The above code works. We can define the second pair of operators
-- to accummulate the result into a list. Yet, the solution is
-- not very satisfactory. We notice that the prompt type ppy is
-- polymorphic over a, the elements we yield. What ensures that
-- `yieldP' yields elements of the same type that enumerateP can pass to the
-- body of the loop? Nothing, actually, at compile time. If yieldP and
-- enumerateP do not agree on the type of the elements, a run-time
-- error will occur.
-- This is where the PM prompt type comes in handy. It has a phantom
-- type parameter c, which can be used to communicate between
-- producers and consumers of the effect. We use the type parameter c
-- to communicate the type of elements, between yield and enumerate.
-- Since the parameter is phantom, it costs us nothing at run-time.
-- The answer-type for one of the prompts
newtype Res m a = Res ( (a -> CC (PM a) m ()) -> CC (PM a) m () )
instance Typeable1 m => Typeable1 (Res m) where
typeOf1 x = mkTyConApp (mkTyCon "Res") [m]
where m = typeOf1 (undefined:: m ())
outRes body (Res f) = f body
-- One prompt, used by the generator (the yield/enumerate pair)
py :: (Typeable1 m, Typeable a) => Prompt (PM a) m (Res m a)
py = pm
-- The rest of the code, up to test_io, is the same as that in Generator1.hs
yield :: (Typeable1 m, Typeable a) => Monad m => a -> CC (PM a) m ()
yield v = shift0P py (\k -> return . Res $ \b -> b v >> k () >>= outRes b)
-- The enumerator: the for-loop essentially
enumerate :: (Typeable1 m, Typeable a, Monad m) =>
CC (PM a) m () -> (a -> CC (PM a) m ()) -> CC (PM a) m ()
enumerate iterator body =
pushPrompt py (iterator >> (return . Res . const $ return ())) >>=
outRes body
-- The in_order function itself: compare with the Python version
in_order :: (Typeable1 m, Monad m) => Tree -> CC (PM Label) m ()
in_order Leaf = return ()
in_order (Node label left right) = do
in_order left
yield label
in_order right
-- Print out the result of the in-order traversal
test_io :: IO ()
test_io = runCC $ enumerate (in_order tree1) (liftIO .(print :: (Int -> IO ())))
-- 4 2 5 1 6 3 7
-- The second application of control: accumulating the results in a list
-- The answer-type for the second prompt. We use newtype for identification
newtype Acc a = Acc [a] deriving Typeable
toAcc v (Acc l) = return . Acc $ v:l
-- The second prompt, used by the acc/accumulated pair
-- Again we use the mark of PM to communicate the type of the elements
-- between `acc' and `accumulated'. It happens to be the same type used
-- by yield/enumetrate.
-- If that was not the case, we could have easily arranged for a type-level
-- record (see HList or the TFP paper).
pa :: (Typeable a) => Prompt (PM a) m (Acc a)
pa = pm
acc :: (Typeable a, Monad m) => a -> CC (PM a) m ()
acc v = shift0P pa (\k -> k () >>= toAcc v)
accumulated :: (Typeable a, Monad m) => CC (PM a) m () -> CC (PM a) m [a]
accumulated body =
pushPrompt pa (body >> return (Acc [])) >>= \ (Acc l) -> return l
test_acc :: [Label]
test_acc = runIdentity . runCC . accumulated $
(enumerate (in_order tree1) acc)
-- [4,2,5,1,6,3,7]
-- To avoid importing mtl, we define Identity on our own
newtype Identity a = Identity{runIdentity :: a} deriving (Typeable)
instance Monad Identity where
return = Identity
m >>= f = f $ runIdentity m
| nishiuramakoto/logiku | my-CC-delcont-cxe-0.1.0.2/Generator2.hs | gpl-3.0 | 7,002 | 10 | 13 | 1,499 | 1,654 | 874 | 780 | 73 | 2 |
module NeuroNet where
import qualified Data.Matrix as M
type DMatrix = M.Matrix Double
type Weights = DMatrix
type Bias = DMatrix
type Input = DMatrix
type Output = DMatrix
data Level = Level {weights::Weights, bias::Bias}
type Net = [Level]
data LevelRun = LevelRun {input::Input, output::Output, wInput::Input}
type Run = [LevelRun]
-- | Convert a list of doubles to an input vector
createInput :: [Double] -> Input
createInput list = M.fromList (length list) 1 list
-- | Convert a list of doubles to a bias vector
createBias :: [Double] -> Bias
createBias = createInput
-- | Convert a list of double to a system matrix of a neuron level
createWeights :: Int -> Int -> [Double] -> Weights
createWeights rows cols wts = M.fromList rows cols wts
-- | The sigmoid activation function, a standard activation function defined
-- on the range (0, 1).
sigmoid :: (Floating a) => a -> a
sigmoid t = 1 / (1 + exp (-1 * t))
-- | The derivative of the sigmoid function conveniently can be computed in
-- terms of the sigmoid function.
sigmoid' :: (Floating a) => a -> a
sigmoid' t = s * (1 - s)
where s = sigmoid t
-- | The sigmoid activation function applied to a whole output layer
sigmoidO :: Output -> Output
sigmoidO out = fmap sigmoid out
-- | The derivative of sigmoid activation function applied to a whole output layer
sigmoidO' :: Output -> Output
sigmoidO' out = fmap sigmoid' out
-- | Apply a layer and the activation function to an input; gives an output
applyLevel :: Level -> Input -> Output
applyLevel l = sigmoidO . bareApplyLevel l
-- | Apply the system matrix of the layer and the bias to the input
bareApplyLevel :: Level -> Input -> Output
bareApplyLevel level input = M.elementwise (+) a b
where a = M.multStd2 (weights level) input
b = (bias level)
-- | Apply a network to an input
runNet :: Net -> Input -> Run
runNet [] _ = []
runNet (level:levels) input = [LevelRun {input=input, output=output, wInput=wInput}] ++ runNet levels output
where wInput = bareApplyLevel level input
output = sigmoidO wInput
| brachiel/neuronet | src/NeuroNet.hs | gpl-3.0 | 2,186 | 0 | 11 | 529 | 547 | 307 | 240 | 37 | 1 |
{-# LANGUAGE PatternGuards #-}
{-# LANGUAGE QuasiQuotes #-}
{-# LANGUAGE TupleSections #-}
-- FIXME: for functions prefixedShowDot
{-# LANGUAGE FlexibleContexts #-}
{- |
Module : Web.Theory
Description : Pretty-printing security protocol theories into HTML code.
Copyright : (c) 2011, 2012 Simon Meier & Cedric Staub
License : GPL-3
Maintainer : Simon Meier <iridcode@gmail.com>
Stability : experimental
Portability : non-portable
-}
module Web.Theory
( htmlThyPath
, htmlDiffThyPath
-- , htmlThyDbgPath
, imgThyPath
, imgDiffThyPath
, titleThyPath
, titleDiffThyPath
, theoryIndex
, diffTheoryIndex
, nextThyPath
, nextDiffThyPath
, prevThyPath
, prevDiffThyPath
, nextSmartThyPath
, nextSmartDiffThyPath
, prevSmartThyPath
, prevSmartDiffThyPath
, applyMethodAtPath
, applyMethodAtPathDiff
, applyDiffMethodAtPath
, applyProverAtPath
, applyDiffProverAtPath
, applyProverAtPathDiff
)
where
import Debug.Trace (trace)
import Data.Char (toUpper)
import Data.List
import qualified Data.Map as M
import Data.Maybe
import Data.Monoid
import qualified Data.Set as S
import qualified Data.Text as T
import Data.Time.Format (formatTime)
import Control.Basics
import Control.Concurrent (threadDelay)
import System.Directory
import System.FilePath
-- #if MIN_VERSION_time(1,5,0)
import Data.Time.Format (defaultTimeLocale)
-- #else
-- For GHC 7.10 comment line below and uncoment line 2 above
-- import System.Locale (defaultTimeLocale)
-- #endif
import Extension.Data.Label
import Text.Blaze.Html (preEscapedToMarkup, toHtml)
import qualified Text.Dot as D
import Text.Hamlet (Html, hamlet)
import Text.PrettyPrint.Html
import Utils.Misc (stringSHA256)
import System.Exit
import System.Process
import Logic.Connectives
import Theory
import Theory.Constraint.System.Dot (nonEmptyGraph,nonEmptyGraphDiff)
import Theory.Text.Pretty
import Theory.Tools.Wellformedness
import Web.Settings
import Web.Types
------------------------------------------------------------------------------
-- Various other functions
------------------------------------------------------------------------------
applyMethodAtPath :: ClosedTheory -> String -> ProofPath
-> Heuristic -- ^ How to extract/order the proof methods.
-> Int -- What proof method to use.
-> Maybe ClosedTheory
applyMethodAtPath thy lemmaName proofPath heuristic i = do
lemma <- lookupLemma lemmaName thy
subProof <- get lProof lemma `atPath` proofPath
let ctxt = getProofContext lemma thy
sys = psInfo (root subProof)
ranking = useHeuristic heuristic (length proofPath)
methods <- (map fst . rankProofMethods ranking ctxt) <$> sys
method <- if length methods >= i then Just (methods !! (i-1)) else Nothing
applyProverAtPath thy lemmaName proofPath
(oneStepProver method `mappend`
replaceSorryProver (oneStepProver Simplify) `mappend`
replaceSorryProver (contradictionProver) `mappend`
replaceSorryProver (oneStepProver Solved)
)
applyMethodAtPathDiff :: ClosedDiffTheory -> Side -> String -> ProofPath
-> Heuristic -- ^ How to extract/order the proof methods.
-> Int -- What proof method to use.
-> Maybe ClosedDiffTheory
applyMethodAtPathDiff thy s lemmaName proofPath heuristic i = do
lemma <- lookupLemmaDiff s lemmaName thy
subProof <- get lProof lemma `atPath` proofPath
let ctxt = getProofContextDiff s lemma thy
sys = psInfo (root subProof)
ranking = useHeuristic heuristic (length proofPath)
methods <- (map fst . rankProofMethods ranking ctxt) <$> sys
method <- if length methods >= i then Just (methods !! (i-1)) else Nothing
applyProverAtPathDiff thy s lemmaName proofPath
(oneStepProver method `mappend`
replaceSorryProver (oneStepProver Simplify) `mappend`
replaceSorryProver (contradictionProver) `mappend`
replaceSorryProver (oneStepProver Solved)
)
applyDiffMethodAtPath :: ClosedDiffTheory -> String -> ProofPath
-> Heuristic -- ^ How to extract/order the proof methods.
-> Int -- What proof method to use.
-> Maybe ClosedDiffTheory
applyDiffMethodAtPath thy lemmaName proofPath heuristic i = do
lemma <- lookupDiffLemma lemmaName thy
subProof <- get lDiffProof lemma `atPathDiff` proofPath
let ctxt = getDiffProofContext lemma thy
sys = dpsInfo (root subProof)
ranking = useHeuristic heuristic (length proofPath)
methods <- (map fst . rankDiffProofMethods ranking ctxt) <$> sys
method <- if length methods >= i then Just (methods !! (i-1)) else Nothing
applyDiffProverAtPath thy lemmaName proofPath
(oneStepDiffProver method `mappend`
replaceDiffSorryProver (oneStepDiffProver (DiffBackwardSearchStep Simplify)) `mappend`
replaceDiffSorryProver (contradictionDiffProver) `mappend`
replaceDiffSorryProver (oneStepDiffProver DiffMirrored)
)
applyProverAtPath :: ClosedTheory -> String -> ProofPath
-> Prover -> Maybe ClosedTheory
applyProverAtPath thy lemmaName proofPath prover =
modifyLemmaProof (focus proofPath prover) lemmaName thy
applyProverAtPathDiff :: ClosedDiffTheory -> Side -> String -> ProofPath
-> Prover -> Maybe ClosedDiffTheory
applyProverAtPathDiff thy s lemmaName proofPath prover =
modifyLemmaProofDiff s (focus proofPath prover) lemmaName thy
applyDiffProverAtPath :: ClosedDiffTheory -> String -> ProofPath
-> DiffProver -> Maybe ClosedDiffTheory
applyDiffProverAtPath thy lemmaName proofPath prover =
-- error (show thy ++ "<br> " ++ show lemmaName ++ "<br> " ++ show proofPath ++ "<br> "{- ++ show prover-})
modifyDiffLemmaProof (focusDiff proofPath prover) lemmaName thy
------------------------------------------------------------------------------
-- Pretty printing
------------------------------------------------------------------------------
-- | Reference a dot graph for the given path.
refDotPath :: HtmlDocument d => RenderUrl -> TheoryIdx -> TheoryPath -> d
refDotPath renderUrl tidx path = closedTag "img" [("class", "graph"), ("src", imgPath)]
where imgPath = T.unpack $ renderUrl (TheoryGraphR tidx path)
-- | Reference a dot graph for the given diff path.
refDotDiffPath :: HtmlDocument d => RenderUrl -> TheoryIdx -> DiffTheoryPath -> Bool -> d
refDotDiffPath renderUrl tidx path mirror = closedTag "img" [("class", "graph"), ("src", imgPath)]
where imgPath = if mirror
then T.unpack $ renderUrl (TheoryMirrorDiffR tidx path)
else T.unpack $ renderUrl (TheoryGraphDiffR tidx path)
getDotPath :: String -> FilePath
getDotPath code = imageDir </> addExtension (stringSHA256 code) "dot"
getGraphPath :: String -> String -> FilePath
getGraphPath ext code = imageDir </> addExtension (stringSHA256 code) ext
-- | Create a link to a given theory path.
linkToPath :: HtmlDocument d
=> RenderUrl -- ^ Url rendering function.
-> Route WebUI -- ^ Route that should be linked.
-> [String] -- ^ Additional class
-> d -- ^ Document that carries the link.
-> d
linkToPath renderUrl route cls = withTag "a" [("class", classes), ("href", linkPath)]
where
classes = unwords $ "internal-link" : cls
linkPath = T.unpack $ renderUrl route
-- | Output some preformatted text.
preformatted :: HtmlDocument d => Maybe String -> d -> d
preformatted cl = withTag "div" [("class", classes cl)]
where
classes (Just cls) = "preformatted " ++ cls
classes (Nothing) = "preformatted"
-- | Render a proof index relative to a theory path constructor.
proofIndex :: HtmlDocument d
=> RenderUrl
-> (ProofPath -> Route WebUI) -- ^ Relative addressing function
-> Proof (Maybe System, Maybe Bool) -- ^ The annotated incremental proof
-> d
proofIndex renderUrl mkRoute =
prettyProofWith ppStep ppCase . insertPaths
where
ppCase step = markStatus (fst $ psInfo step)
ppStep step =
case fst $ psInfo step of
(Nothing, _) -> superfluousStep
(_, Nothing) -> stepLink ["sorry-step"]
(_, Just True) -> stepLink ["hl_good"]
(_, Just False) -> stepLink ["hl_bad"]
<> case psMethod step of
Sorry _ -> emptyDoc
_ -> removeStep
where
ppMethod = prettyProofMethod $ psMethod step
stepLink cls = linkToPath renderUrl
(mkRoute . snd . psInfo $ step)
("proof-step" : cls) ppMethod
superfluousStep = withTag "span" [("class","hl_superfluous")] ppMethod
removeStep = linkToPath renderUrl (mkRoute . snd . psInfo $ step)
["remove-step"] emptyDoc
-- | Render a proof index relative to a theory path constructor.
diffProofIndex :: HtmlDocument d
=> RenderUrl
-> (ProofPath -> Route WebUI) -- ^ Relative addressing function
-> DiffProof (Maybe DiffSystem, Maybe Bool) -- ^ The annotated incremental proof
-> d
diffProofIndex renderUrl mkRoute =
prettyDiffProofWith ppStep ppCase . insertPathsDiff
where
ppCase step = markStatusDiff (fst $ dpsInfo step)
ppStep step =
case fst $ dpsInfo step of
(Nothing, _) -> superfluousStep
(_, Nothing) -> stepLink ["sorry-step"]
(_, Just True) -> stepLink ["hl_good"]
(_, Just False) -> stepLink ["hl_bad"]
<> case dpsMethod step of
DiffSorry _ -> emptyDoc
_ -> removeStep
where
ppMethod = prettyDiffProofMethod $ dpsMethod step
stepLink cls = linkToPath renderUrl
(mkRoute . snd . dpsInfo $ step)
("proof-step" : cls) ppMethod
superfluousStep = withTag "span" [("class","hl_superfluous")] ppMethod
removeStep = linkToPath renderUrl (mkRoute . snd . dpsInfo $ step)
["remove-step"] emptyDoc
-- | Render the indexing links for a single lemma
lemmaIndex :: HtmlDocument d
=> RenderUrl -- ^ The url rendering function
-> TheoryIdx -- ^ The theory index
-> Lemma IncrementalProof -- ^ The lemma
-> d
lemmaIndex renderUrl tidx l =
( markStatus (psInfo $ root annPrf) $
(kwLemma <-> prettyLemmaName l <> colon)
-- FIXME: Reactivate theory editing.
-- <->
-- (linkToPath renderUrl lemmaRoute ["edit-link"] editPng <->
-- linkToPath renderUrl lemmaRoute ["delete-link"] deletePng)
$-$
nest 2 ( sep [ prettyTraceQuantifier $ get lTraceQuantifier l
, doubleQuotes $ prettyLNFormula $ get lFormula l
] )
) $-$
proofIndex renderUrl mkRoute annPrf
where
-- editPng = png "/static/img/edit.png"
-- deletePng = png "/static/img/delete.png"
-- png path = closedTag "img" [("class","icon"),("src",path)]
-- lemmaRoute = TheoryPathMR tidx (TheoryLemma $ get lName l)
annPrf = annotateLemmaProof l
mkRoute proofPath = TheoryPathMR tidx (TheoryProof (get lName l) proofPath)
-- | Render the indexing links for a single lemma
lemmaIndexDiff :: HtmlDocument d
=> RenderUrl -- ^ The url rendering function
-> TheoryIdx -- ^ The theory index
-> Side
-> Lemma IncrementalProof -- ^ The lemma
-> d
lemmaIndexDiff renderUrl tidx s l =
-- error (show annPrf)
( markStatus (psInfo $ root annPrf) $
(kwLemma <-> prettyLemmaName l <> colon)
-- FIXME: Reactivate theory editing.
-- <->
-- (linkToPath renderUrl lemmaRoute ["edit-link"] editPng <->
-- linkToPath renderUrl lemmaRoute ["delete-link"] deletePng)
$-$
nest 2 ( sep [ prettyTraceQuantifier $ get lTraceQuantifier l
, doubleQuotes $ prettyLNFormula $ get lFormula l
] )
) $-$
proofIndex renderUrl mkRoute annPrf
where
-- editPng = png "/static/img/edit.png"
-- deletePng = png "/static/img/delete.png"
-- png path = closedTag "img" [("class","icon"),("src",path)]
-- lemmaRoute = TheoryPathMR tidx (TheoryLemma $ get lName l)
annPrf = annotateLemmaProof l
mkRoute proofPath = TheoryPathDiffMR tidx (DiffTheoryProof s (get lName l) proofPath)
-- | Render the indexing links for a single diff lemma
diffLemmaIndex :: HtmlDocument d
=> RenderUrl -- ^ The url rendering function
-> TheoryIdx -- ^ The theory index
-> DiffLemma IncrementalDiffProof -- ^ The lemma
-> d
diffLemmaIndex renderUrl tidx l =
-- error (show annPrf)
( markStatusDiff (dpsInfo $ root annPrf) $
(kwLemma <-> prettyDiffLemmaName l {-<> text (show annPrf)-} <> colon)
-- FIXME: Reactivate theory editing.
-- <->
-- (linkToPath renderUrl lemmaRoute ["edit-link"] editPng <->
-- linkToPath renderUrl lemmaRoute ["delete-link"] deletePng)
-- $-$
-- nest 2 ( sep [ prettyTraceQuantifier $ get lTraceQuantifier l
-- , doubleQuotes $ prettyLNFormula $ get lFormula l
-- ] )
) $-$
diffProofIndex renderUrl mkRoute annPrf
where
-- editPng = png "/static/img/edit.png"
-- deletePng = png "/static/img/delete.png"
-- png path = closedTag "img" [("class","icon"),("src",path)]
-- lemmaRoute = TheoryPathMR tidx (TheoryLemma $ get lName l)
annPrf = annotateDiffLemmaProof l
mkRoute proofPath = TheoryPathDiffMR tidx (DiffTheoryDiffProof (get lDiffName l) proofPath)
-- | Render the theory index.
theoryIndex :: HtmlDocument d => RenderUrl -> TheoryIdx -> ClosedTheory -> d
theoryIndex renderUrl tidx thy = foldr1 ($-$)
[ kwTheoryHeader
$ linkToPath renderUrl (TheoryPathMR tidx TheoryHelp) ["help"]
$ text $ get thyName thy
, text ""
, messageLink
, text ""
, ruleLink
, text ""
, reqCasesLink "Raw sources" RawSource
, text ""
, reqCasesLink "Refined sources " RefinedSource
, text ""
, vcat $ intersperse (text "") lemmas
, text ""
, kwEnd
]
where
lemmaIndex' lemma = lemmaIndex renderUrl tidx lemma
lemmas = map lemmaIndex' (getLemmas thy)
rules = getClassifiedRules thy
rulesInfo = parens $ int $ length $ get crProtocol rules
casesInfo kind =
parens $ nCases <> comma <-> text chainInfo
where
cases = getSource kind thy
nChains = sum $ map (sum . unsolvedChainConstraints) cases
nCases = int (length cases) <-> text "cases"
chainInfo | nChains == 0 = "deconstructions complete"
| otherwise = show nChains ++ " partial deconstructions left"
bold = withTag "strong" [] . text
overview n info p = linkToPath renderUrl (TheoryPathMR tidx p) [] (bold n <-> info)
messageLink = overview "Message theory" (text "") TheoryMessage
ruleLink = overview ruleLinkMsg rulesInfo TheoryRules
ruleLinkMsg = "Multiset rewriting rules" ++
if null(theoryRestrictions thy) then "" else " and restrictions"
reqCasesLink name k = overview name (casesInfo k) (TheorySource k 0 0)
-- | Render the theory index.
diffTheoryIndex :: HtmlDocument d => RenderUrl -> TheoryIdx -> ClosedDiffTheory -> d
diffTheoryIndex renderUrl tidx thy = foldr1 ($-$)
[ kwTheoryHeader
$ linkToPath renderUrl (TheoryPathDiffMR tidx DiffTheoryHelp) ["help"]
$ text $ get diffThyName thy
, text ""
, diffRules
, text ""
, messageLink LHS False
, text ""
, messageLink RHS False
, text ""
, messageLink LHS True
, text ""
, messageLink RHS True
, text ""
, ruleLink LHS False
, text ""
, ruleLink RHS False
, text ""
, ruleLink LHS True
, text ""
, ruleLink RHS True
, text ""
, reqCasesLink LHS "LHS: Raw sources " RawSource False
, text ""
, reqCasesLink RHS "RHS: Raw sources " RawSource False
, text ""
, reqCasesLink LHS "LHS: Raw sources [Diff] " RawSource True
, text ""
, reqCasesLink RHS "RHS: Raw sources [Diff] " RawSource True
, text ""
, reqCasesLink LHS "LHS: Refined sources " RefinedSource False
, text ""
, reqCasesLink RHS "RHS: Refined sources " RefinedSource False
, text ""
, reqCasesLink LHS "LHS: Refined sources [Diff] " RefinedSource True
, text ""
, reqCasesLink RHS "RHS: Refined sources [Diff] " RefinedSource True
, text ""
, bold "LHS: Lemmas"
, text ""
, vcat $ intersperse (text "") (lemmas LHS)
, text ""
, bold "RHS: Lemmas"
, text ""
, vcat $ intersperse (text "") (lemmas RHS)
, text ""
, bold "Diff-Lemmas"
, text ""
, vcat $ intersperse (text "") (diffLemmas)
, text ""
, kwEnd
]
where
lemmaIndex' s lemma = lemmaIndexDiff renderUrl tidx s lemma
diffLemmaIndex' lemma = diffLemmaIndex renderUrl tidx lemma
lemmas s = map (lemmaIndex' s) (diffTheorySideLemmas s thy)
diffLemmas = map diffLemmaIndex' (getDiffLemmas thy)
rules s isdiff = getDiffClassifiedRules s isdiff thy
rulesInfo s isdiff = parens $ int $ length $ get crProtocol (rules s isdiff)
casesInfo s kind isdiff =
parens $ nCases <> comma <-> text chainInfo
where
cases = getDiffSource s isdiff kind thy
nChains = sum $ map (sum . unsolvedChainConstraints) cases
nCases = int (length cases) <-> text "cases"
chainInfo | nChains == 0 = "deconstructions complete"
| otherwise = show nChains ++ " partial deconstructions left"
bold = withTag "strong" [] . text
overview n info p = linkToPath renderUrl (TheoryPathDiffMR tidx p) [] (bold n <-> info)
diffRules = overview ("Diff Rules") (text "") (DiffTheoryDiffRules)
messageLink s isdiff = overview (show s ++ ": Message theory" ++ if isdiff then " [Diff]" else "") (text "") (DiffTheoryMessage s isdiff)
ruleLink s isdiff = overview (ruleLinkMsg s isdiff) (rulesInfo s isdiff) (DiffTheoryRules s isdiff)
ruleLinkMsg s isdiff = show s ++ ": Multiset rewriting rules " ++
(if null(diffTheorySideRestrictions s thy) then "" else " and restrictions") ++ (if isdiff then " [Diff]" else "")
reqCasesLink s name k isdiff = overview name (casesInfo s k isdiff) (DiffTheorySource s k isdiff 0 0)
{-
-- | A snippet that explains a sequent using a rendered graph and the pretty
-- printed sequent.
sequentSnippet :: HtmlDocument d
=> System -- ^ System to pretty print.
-> TheoryPath -- ^ The sequents path (NOT the path to its PNG)
-> d
sequentSnippet se path = refDotPath path $-$ preformatted Nothing (prettySystem se)
-}
-- | A snippet that explains a sub-proof by displaying its proof state, the
-- open-goals, and the new cases.
subProofSnippet :: HtmlDocument d
=> RenderUrl
-> TheoryIdx -- ^ The theory index.
-> TheoryInfo -- ^ The theory info of this index.
-> String -- ^ The lemma.
-> ProofPath -- ^ The proof path.
-> ProofContext -- ^ The proof context.
-> IncrementalProof -- ^ The sub-proof.
-> d
subProofSnippet renderUrl tidx ti lemma proofPath ctxt prf =
case psInfo $ root prf of
Nothing -> text $ "no annotated constraint system / " ++ nCases ++ " sub-case(s)"
Just se -> vcat $
prettyApplicableProofMethods se
++
[ text ""
, withTag "h3" [] (text "Constraint system")
] ++
[ refDotPath renderUrl tidx (TheoryProof lemma proofPath)
| nonEmptyGraph se ]
++
[ preformatted (Just "sequent") (prettyNonGraphSystem se)
, withTag "h3" [] (text $ nCases ++ " sub-case(s)")
] ++
subCases
where
prettyApplicableProofMethods sys = case proofMethods sys of
[] -> [ withTag "h3" [] (text "Constraint System is Solved") ]
pms ->
[ withTag "h3" [] (text "Applicable Proof Methods:" <->
comment_ (goalRankingName ranking))
, preformatted (Just "methods") (numbered' $ map prettyPM $ zip [1..] pms)
, autoProverLinks 'a' "" emptyDoc 0
, autoProverLinks 'b' "bounded-" boundDesc bound
]
where
boundDesc = text $ " with proof-depth bound " ++ show bound
bound = fromMaybe 5 $ apBound $ tiAutoProver ti
autoProverLinks key classPrefix nameSuffix bound = hsep
[ text (key : ".")
, linkToPath renderUrl
(AutoProverR tidx CutDFS bound (TheoryProof lemma proofPath))
[classPrefix ++ "autoprove"]
(keyword_ $ "autoprove")
, parens $
text (toUpper key : ".") <->
linkToPath renderUrl
(AutoProverR tidx CutNothing bound (TheoryProof lemma proofPath))
[classPrefix ++ "characterization"]
(keyword_ "for all solutions")
, nameSuffix
]
prettyPM (i, (m, (_cases, expl))) =
linkToPath renderUrl
(TheoryPathMR tidx (TheoryMethod lemma proofPath i))
["proof-method"] (prettyProofMethod m)
<-> (if null expl then emptyDoc else lineComment_ expl)
nCases = show $ M.size $ children prf
depth = length proofPath
ranking = useHeuristic (apHeuristic $ tiAutoProver ti) depth
proofMethods = rankProofMethods ranking ctxt
subCases = concatMap refSubCase $ M.toList $ children prf
refSubCase (name, prf') =
[ withTag "h4" [] (text "Case" <-> text name)
, maybe (text "no proof state available")
(const $ refDotPath renderUrl tidx $ TheoryProof lemma (proofPath ++ [name]))
(psInfo $ root prf')
]
-- | A snippet that explains a sub-proof by displaying its proof state, the
-- open-goals, and the new cases.
subProofDiffSnippet :: HtmlDocument d
=> RenderUrl
-> TheoryIdx -- ^ The theory index.
-> DiffTheoryInfo -- ^ The diff theory info of this index.
-> Side -- ^ The side of the lemma.
-> String -- ^ The lemma.
-> ProofPath -- ^ The proof path.
-> ProofContext -- ^ The proof context.
-> IncrementalProof -- ^ The sub-proof.
-> d
subProofDiffSnippet renderUrl tidx ti s lemma proofPath ctxt prf =
case psInfo $ root prf of
Nothing -> text $ "no annotated constraint system / " ++ nCases ++ " sub-case(s)"
Just se -> vcat $
prettyApplicableProofMethods se
++
[ text ""
, withTag "h3" [] (text "Constraint system")
] ++
[ refDotDiffPath renderUrl tidx (DiffTheoryProof s lemma proofPath) False
| nonEmptyGraph se ]
++
[ preformatted (Just "sequent") (prettyNonGraphSystem se)
, withTag "h3" [] (text $ nCases ++ " sub-case(s)")
] ++
subCases
where
prettyApplicableProofMethods sys = case proofMethods sys of
[] -> [ withTag "h3" [] (text "Constraint System is Solved") ]
pms ->
[ withTag "h3" [] (text "Applicable Proof Methods:" <->
comment_ (goalRankingName ranking))
, preformatted (Just "methods") (numbered' $ map prettyPM $ zip [1..] pms)
, autoProverLinks 'a' "" emptyDoc 0
, autoProverLinks 'b' "bounded-" boundDesc bound
]
where
boundDesc = text $ " with proof-depth bound " ++ show bound
bound = fromMaybe 5 $ apBound $ dtiAutoProver ti
autoProverLinks key classPrefix nameSuffix bound = hsep
[ text (key : ".")
, linkToPath renderUrl
(AutoProverDiffR tidx CutDFS bound s (DiffTheoryProof s lemma proofPath))
[classPrefix ++ "autoprove"]
(keyword_ $ "autoprove")
, parens $
text (toUpper key : ".") <->
linkToPath renderUrl
(AutoProverDiffR tidx CutNothing bound s (DiffTheoryProof s lemma proofPath))
[classPrefix ++ "characterization"]
(keyword_ "for all solutions")
, nameSuffix
]
prettyPM (i, (m, (_cases, expl))) =
linkToPath renderUrl
(TheoryPathDiffMR tidx (DiffTheoryMethod s lemma proofPath i))
["proof-method"] (prettyProofMethod m)
<-> (if null expl then emptyDoc else lineComment_ expl)
nCases = show $ M.size $ children prf
depth = length proofPath
ranking = useHeuristic (apHeuristic $ dtiAutoProver ti) depth
proofMethods = rankProofMethods ranking ctxt
subCases = concatMap refSubCase $ M.toList $ children prf
refSubCase (name, prf') =
[ withTag "h4" [] (text "Case" <-> text name)
, maybe (text "no proof state available")
(const $ refDotDiffPath renderUrl tidx (DiffTheoryProof s lemma (proofPath ++ [name])) False)
(psInfo $ root prf')
]
-- | A snippet that explains a sub-proof by displaying its proof state, the
-- open-goals, and the new cases.
subDiffProofSnippet :: HtmlDocument d
=> RenderUrl
-> TheoryIdx -- ^ The theory index.
-> DiffTheoryInfo -- ^ The diff theory info of this index.
-> String -- ^ The diff lemma.
-> ProofPath -- ^ The proof path.
-> DiffProofContext -- ^ The proof context.
-> IncrementalDiffProof -- ^ The sub-proof.
-> d
subDiffProofSnippet renderUrl tidx ti lemma proofPath ctxt prf =
case dpsInfo $ root prf of
Nothing -> text $ "no annotated constraint system / " ++ nCases ++ " sub-case(s)"
Just se -> vcat $
prettyApplicableDiffProofMethods se
++
[ text ""
, withTag "h3" [] (text "Constraint system")
] ++
[ refDotDiffPath renderUrl tidx (DiffTheoryDiffProof lemma proofPath) False
| nonEmptyGraphDiff se ]
++
mirrorSystem
++
[ preformatted (Just "sequent") (prettyNonGraphSystemDiff ctxt se)
, withTag "h3" [] (text $ nCases ++ " sub-case(s)")
] ++
subCases
where
prettyApplicableDiffProofMethods sys = case diffProofMethods sys of
[] -> [ withTag "h3" [] (text "Constraint System is Solved") ]
pms ->
[ withTag "h3" [] (text "Applicable Proof Methods:" <->
comment_ (goalRankingName ranking))
, preformatted (Just "methods") (numbered' $ map prettyPM $ zip [1..] pms)
, autoProverLinks 'a' "" emptyDoc 0
, autoProverLinks 'b' "bounded-" boundDesc bound
]
where
boundDesc = text $ " with proof-depth bound " ++ show bound
bound = fromMaybe 5 $ apBound $ dtiAutoProver ti
mirrorSystem =
if dpsMethod (root prf) == DiffMirrored
then [ text "", withTag "h3" [] (text "mirror:") ] ++
[ refDotDiffPath renderUrl tidx (DiffTheoryDiffProof lemma proofPath) True ] ++
[ text "" ]
else if dpsMethod (root prf) == DiffAttack
then [ text "", withTag "h3" [] (text "attack:") ] ++
[ refDotDiffPath renderUrl tidx (DiffTheoryDiffProof lemma proofPath) True ] ++
[ text "(If no attack graph is shown, the current graph has no mirrors. If one of the mirror graphs violates a restriction, this graph is shown.)" ] ++
[ text "" ]
else []
autoProverLinks key classPrefix nameSuffix bound = hsep
[ text (key : ".")
, linkToPath renderUrl
(AutoDiffProverR tidx CutDFS bound (DiffTheoryDiffProof lemma proofPath))
[classPrefix ++ "autoprove"]
(keyword_ $ "autoprove")
, parens $
text (toUpper key : ".") <->
linkToPath renderUrl
(AutoDiffProverR tidx CutNothing bound (DiffTheoryDiffProof lemma proofPath))
[classPrefix ++ "characterization"]
(keyword_ "for all solutions")
, nameSuffix
]
prettyPM (i, (m, (_cases, expl))) =
linkToPath renderUrl
(TheoryPathDiffMR tidx (DiffTheoryDiffMethod lemma proofPath i))
["proof-method"] (prettyDiffProofMethod m)
<-> (if null expl then emptyDoc else lineComment_ expl)
nCases = show $ M.size $ children prf
depth = length proofPath
ranking = useHeuristic (apHeuristic $ dtiAutoProver ti) depth
diffProofMethods = rankDiffProofMethods ranking ctxt
subCases = concatMap refSubCase $ M.toList $ children prf
refSubCase (name, prf') =
[ withTag "h4" [] (text "Case" <-> text name)
, maybe (text "no proof state available")
(const $ refDotDiffPath renderUrl tidx (DiffTheoryDiffProof lemma (proofPath ++ [name])) False)
(dpsInfo $ root prf')
]
-- | A Html document representing the requires case splitting theorem.
htmlSource :: HtmlDocument d
=> RenderUrl -> TheoryIdx -> SourceKind -> (Int, Source) -> d
htmlSource renderUrl tidx kind (j, th) =
if null cases
then withTag "h2" [] ppHeader $-$ withTag "h3" [] (text "No cases.")
else vcat $ withTag "h2" [] ppHeader : cases
where
cases = concatMap ppCase $ zip [1..] $ getDisj $ get cdCases th
wrapP = withTag "p" [("class","monospace cases")]
nCases = int $ length $ getDisj $ get cdCases th
ppPrem = nest 2 $ doubleQuotes $ prettyGoal $ get cdGoal th
ppHeader = hsep
[ text "Sources of" <-> ppPrem
, parens $ nCases <-> text "cases"
]
ppCase (i, (names, se)) =
[ withTag "h3" [] $ fsep [ text "Source", int i, text "of", nCases
, text " / named ", doubleQuotes (text name),
if isPartial then text "(partial deconstructions)" else text "" ]
, refDotPath renderUrl tidx (TheorySource kind j i)
, withTag "p" [] $ ppPrem
, wrapP $ prettyNonGraphSystem se
]
where
name = intercalate "_" names
isPartial = not $ null $ unsolvedChains se
-- | A Html document representing the requires case splitting theorem.
htmlSourceDiff :: HtmlDocument d
=> RenderUrl -> TheoryIdx -> Side -> SourceKind -> Bool -> (Int, Source) -> d
htmlSourceDiff renderUrl tidx s kind d (j, th) =
if null cases
then withTag "h2" [] ppHeader $-$ withTag "h3" [] (text "No cases.")
else vcat $ withTag "h2" [] ppHeader : cases
where
cases = concatMap ppCase $ zip [1..] $ getDisj $ get cdCases th
wrapP = withTag "p" [("class","monospace cases")]
nCases = int $ length $ getDisj $ get cdCases th
ppPrem = nest 2 $ doubleQuotes $ prettyGoal $ get cdGoal th
ppHeader = hsep
[ text "Sources of" <-> ppPrem
, parens $ nCases <-> text "cases"
]
ppCase (i, (names, se)) =
[ withTag "h3" [] $ fsep [ text "Source", int i, text "of", nCases
, text " / named ", doubleQuotes (text name) ]
, refDotDiffPath renderUrl tidx (DiffTheorySource s kind d j i) False
, withTag "p" [] $ ppPrem
, wrapP $ prettyNonGraphSystem se
]
where
name = intercalate "_" names
-- | Build the Html document showing the source cases.
reqCasesSnippet :: HtmlDocument d => RenderUrl -> TheoryIdx -> SourceKind -> ClosedTheory -> d
reqCasesSnippet renderUrl tidx kind thy = vcat $
htmlSource renderUrl tidx kind <$> zip [1..] (getSource kind thy)
-- | Build the Html document showing the source cases.
reqCasesDiffSnippet :: HtmlDocument d => RenderUrl -> TheoryIdx -> Side -> SourceKind -> Bool -> ClosedDiffTheory -> d
reqCasesDiffSnippet renderUrl tidx s kind isdiff thy = vcat $
htmlSourceDiff renderUrl tidx s kind isdiff <$> zip [1..] (getDiffSource s isdiff kind thy)
-- | Build the Html document showing the rules of the theory.
rulesSnippet :: HtmlDocument d => ClosedTheory -> d
rulesSnippet thy = vcat
[ ppWithHeader "Fact Symbols with Injective Instances" $
fsepList (text . showFactTagArity) injFacts
, ppWithHeader "Multiset Rewriting Rules" $
vsep $ map prettyRuleAC msrRules
, ppWithHeader "Restrictions of the Set of Traces" $
vsep $ map prettyRestriction $ theoryRestrictions thy
]
where
msrRules = get crProtocol $ getClassifiedRules thy
injFacts = S.toList $ getInjectiveFactInsts thy
ppWithHeader header body =
caseEmptyDoc
emptyDoc
( withTag "h2" [] (text header) $$
withTag "p" [("class","monospace rules")] body )
body
-- | Build the Html document showing the message theory.
messageSnippet :: HtmlDocument d => ClosedTheory -> d
messageSnippet thy = vcat
[ ppSection "Signature" [prettySignatureWithMaude (get thySignature thy)]
, ppSection "Construction Rules" (ppRules crConstruct)
, ppSection "Deconstruction Rules" (ppRules crDestruct)
]
where
ppRules l = map prettyRuleAC $ get l $ getClassifiedRules thy
ppSection header s =
withTag "h2" [] (text header) $$ withTag "p"
[("class","monospace rules")]
(vcat (intersperse (text "") $ s))
-- | Build the Html document showing the diff rules of the diff theory.
rulesDiffSnippet :: HtmlDocument d => ClosedDiffTheory -> d
rulesDiffSnippet thy = vcat
[ ppWithHeader "Multiset Rewriting Rules" $
vsep $ map prettyProtoRuleE msrRules
]
where
msrRules = diffTheoryDiffRules thy
ppWithHeader header body =
caseEmptyDoc
emptyDoc
( withTag "h2" [] (text header) $$
withTag "p" [("class","monospace rules")] body )
body
-- | Build the Html document showing the either rules of the diff theory.
rulesDiffSnippetSide :: HtmlDocument d => Side -> Bool -> ClosedDiffTheory -> d
rulesDiffSnippetSide s isdiff thy = vcat
[ ppWithHeader "Fact Symbols with Injective Instances" $
fsepList (text . showFactTagArity) injFacts
, ppWithHeader "Multiset Rewriting Rules" $
vsep $ map prettyRuleAC msrRules
, ppWithHeader "Restrictions of the Set of Traces" $
vsep $ map prettyRestriction $ diffTheorySideRestrictions s thy
]
where
msrRules = get crProtocol $ getDiffClassifiedRules s isdiff thy
injFacts = S.toList $ getDiffInjectiveFactInsts s isdiff thy
ppWithHeader header body =
caseEmptyDoc
emptyDoc
( withTag "h2" [] (text header) $$
withTag "p" [("class","monospace rules")] body )
body
-- | Build the Html document showing the message theory.
messageDiffSnippet :: HtmlDocument d => Side -> Bool -> ClosedDiffTheory -> d
messageDiffSnippet s isdiff thy = vcat
[ ppSection "Signature" [prettySignatureWithMaude (get diffThySignature thy)]
, ppSection "Construction Rules" (ppRules crConstruct)
, ppSection "Deconstruction Rules" (ppRules crDestruct)
]
where
ppRules l = map prettyRuleAC $ get l $ getDiffClassifiedRules s isdiff thy
ppSection header t =
withTag "h2" [] (text header) $$ withTag "p"
[("class","monospace rules")]
(vcat (intersperse (text "") $ t))
-- | Render the item in the given theory given by the supplied path.
htmlThyPath :: RenderUrl -- ^ The function for rendering Urls.
-> TheoryInfo -- ^ The info of the theory to render
-> TheoryPath -- ^ Path to render
-> Html
htmlThyPath renderUrl info path =
go path
where
thy = tiTheory info
tidx = tiIndex info
-- Rendering a HtmlDoc to Html
pp :: HtmlDoc Doc -> Html
pp d = case renderHtmlDoc d of
[] -> toHtml "Trying to render document yielded empty string. This is a bug."
cs -> preEscapedToMarkup cs
go (TheoryMethod _ _ _) = pp $ text "Cannot display theory method."
go TheoryRules = pp $ rulesSnippet thy
go TheoryMessage = pp $ messageSnippet thy
go (TheorySource kind _ _) = pp $ reqCasesSnippet renderUrl tidx kind thy
go (TheoryProof l p) = pp $
fromMaybe (text "No such lemma or proof path.") $ do
lemma <- lookupLemma l thy
subProofSnippet renderUrl tidx info l p (getProofContext lemma thy)
<$> resolveProofPath thy l p
go (TheoryLemma _) = pp $ text "Implement lemma pretty printing!"
go TheoryHelp = [hamlet|
$newline never
<p>
Theory: #{get thyName $ tiTheory info}
\ (Loaded at #{formatTime defaultTimeLocale "%T" $ tiTime info}
\ from #{show $ tiOrigin info})
\ #{preEscapedToMarkup wfErrors}
<div id="help">
<h3>Quick introduction
<noscript>
<div class="warning">
Warning: JavaScript must be enabled for the
<span class="tamarin">Tamarin</span>
prover GUI to function properly.
<p>
<em>Left pane: Proof scripts display.
<ul>
<li>
When a theory is initially loaded, there will be a line at the
\ end of each theorem stating #
<tt>"by sorry // not yet proven"
. Click on #
<tt>sorry
\ to inspect the proof state.
<li>
Right-click to show further options, such as autoprove.
<p>
<em>Right pane: Visualization.
<ul>
<li>
Visualization and information display relating to the
\ currently selected item.
<h3>Keyboard shortcuts
<p>
<table>
<tr>
<td>
<span class="keys">j/k
<td>
Jump to the next/previous proof path within the currently
\ focused lemma.
<tr>
<td>
<span class="keys">J/K
<td>
Jump to the next/previous open goal within the currently
\ focused lemma, or to the next/previous lemma if there are no
\ more #
<tt>sorry
\ steps in the proof of the current lemma.
<tr>
<td>
<span class="keys">1-9
<td>
Apply the proof method with the given number as shown in the
\ applicable proof method section in the main view.
<tr>
<td>
<span class="keys">a/A
<td>
Apply the autoprove method to the focused proof step.
\ <span class="keys">a</span>
\ stops after finding a solution, and
\ <span class="keys">A</span>
\ searches for all solutions.
\ Needs to have a #
<tt>sorry
\ selected to work.
<tr>
<td>
<span class="keys">b/B
<td>
Apply a bounded-depth version of the autoprove method to the
\ focused proof step.
\ <span class="keys">b</span>
\ stops after finding a solution, and
\ <span class="keys">B</span>
\ searches for all solutions.
\ Needs to have a #
<tt>sorry
\ selected to work.
<tr>
<td>
<span class="keys">?
<td>
Display this help message.
|] renderUrl
where
wfErrors = case report of
[] -> ""
_ -> "<div class=\"wf-warning\">\nWARNING: the following wellformedness checks failed!<br /><br />\n" ++ (renderHtmlDoc . htmlDoc $ prettyWfErrorReport report) ++ "\n</div>"
report = checkWellformedness $ openTheory thy
-- | Render the item in the given theory given by the supplied path.
htmlDiffThyPath :: RenderUrl -- ^ The function for rendering Urls.
-> DiffTheoryInfo -- ^ The info of the theory to render
-> DiffTheoryPath -- ^ Path to render
-> Html
htmlDiffThyPath renderUrl info path =
go path
where
thy = dtiTheory info
tidx = dtiIndex info
-- Rendering a HtmlDoc to Html
pp :: HtmlDoc Doc -> Html
pp d = case renderHtmlDoc d of
[] -> toHtml "Trying to render document yielded empty string. This is a bug."
cs -> preEscapedToMarkup cs
go (DiffTheoryMethod _ _ _ _) = pp $ text "Cannot display theory method."
go (DiffTheoryDiffMethod _ _ _) = pp $ text "Cannot display theory diff method."
go (DiffTheoryDiffLemma _) = pp $ text "Implement diff lemma pretty printing!"
go (DiffTheoryDiffRules) = pp $ rulesDiffSnippet thy
go (DiffTheoryRules s d) = pp $ rulesDiffSnippetSide s d thy
go (DiffTheoryMessage s d) = pp $ messageDiffSnippet s d thy
go (DiffTheorySource s kind d _ _) = pp $ reqCasesDiffSnippet renderUrl tidx s kind d thy
go (DiffTheoryProof s l p) = pp $
fromMaybe (text "No such lemma or proof path.") $ do
lemma <- lookupLemmaDiff s l thy
subProofDiffSnippet renderUrl tidx info s l p (getProofContextDiff s lemma thy)
<$> resolveProofPathDiff thy s l p
go (DiffTheoryDiffProof l p) = pp $
fromMaybe (text "No such lemma or proof path.") $ do
lemma <- lookupDiffLemma l thy
subDiffProofSnippet renderUrl tidx info l p (getDiffProofContext lemma thy)
<$> resolveProofPathDiffLemma thy l p
go (DiffTheoryLemma _ _) = pp $ text "Implement lemma pretty printing!"
go DiffTheoryHelp = [hamlet|
$newline never
<p>
Theory: #{get diffThyName $ dtiTheory info}
\ (Loaded at #{formatTime defaultTimeLocale "%T" $ dtiTime info}
\ from #{show $ dtiOrigin info})
\ #{preEscapedToMarkup wfErrors}
<div id="help">
<h3>Quick introduction
<noscript>
<div class="warning">
Warning: JavaScript must be enabled for the
<span class="tamarin">Tamarin</span>
prover GUI to function properly.
<p>
<em>Left pane: Proof scripts display.
<ul>
<li>
When a theory is initially loaded, there will be a line at the
\ end of each theorem stating #
<tt>"by sorry // not yet proven"
. Click on #
<tt>sorry
\ to inspect the proof state.
<li>
Right-click to show further options, such as autoprove.
<p>
<em>Right pane: Visualization.
<ul>
<li>
Visualization and information display relating to the
\ currently selected item.
<h3>Keyboard shortcuts
<p>
<table>
<tr>
<td>
<span class="keys">j/k
<td>
Jump to the next/previous proof path within the currently
\ focused lemma.
<tr>
<td>
<span class="keys">J/K
<td>
Jump to the next/previous open goal within the currently
\ focused lemma, or to the next/previous lemma if there are no
\ more #
<tt>sorry
\ steps in the proof of the current lemma.
<tr>
<td>
<span class="keys">1-9
<td>
Apply the proof method with the given number as shown in the
\ applicable proof method section in the main view.
<tr>
<td>
<span class="keys">a/A
<td>
Apply the autoprove method to the focused proof step.
\ <span class="keys">a</span>
\ stops after finding a solution, and
\ <span class="keys">A</span>
\ searches for all solutions.
\ Needs to have a #
<tt>sorry
\ selected to work.
<tr>
<td>
<span class="keys">b/B
<td>
Apply a bounded-depth version of the autoprove method to the
\ focused proof step.
\ <span class="keys">b</span>
\ stops after finding a solution, and
\ <span class="keys">B</span>
\ searches for all solutions.
\ Needs to have a #
<tt>sorry
\ selected to work.
<tr>
<td>
<span class="keys">?
<td>
Display this help message.
|] renderUrl
where
wfErrors = case report of
[] -> ""
_ -> "<div class=\"wf-warning\">\nWARNING: the following wellformedness checks failed!<br /><br />\n" ++ (renderHtmlDoc . htmlDoc $ prettyWfErrorReport report) ++ "\n</div>"
report = checkWellformednessDiff $ openDiffTheory thy
{-
-- | Render debug information for the item in the theory given by the path.
htmlThyDbgPath :: HtmlDocument d
=> ClosedTheory -- ^ The theory to render
-> TheoryPath -- ^ Path to render
-> Maybe d
htmlThyDbgPath thy path = go path
where
go (TheoryProof l p) = do
proof <- resolveProofPath thy l p
prettySystem <$> psInfo (root proof)
go _ = Nothing
-}
-- | Render the image corresponding to the given theory path.
imgThyPath :: ImageFormat
-> (String, FilePath) -- ^ choice and command for rendering (dot or json)
-> FilePath -- ^ Tamarin's cache directory
-> (System -> D.Dot ())
-> (String -> System -> String)
-- ^ to export contraint system to JSON
-> String -- ^ Simplification level of graph (string representation of integer >= 0)
-> Bool -- ^ True iff we want abbreviations
-> ClosedTheory
-> TheoryPath
-> IO FilePath
imgThyPath imgFormat (graphChoice, graphCommand) cacheDir_ compact showJsonGraphFunct simplificationLevel abbreviate thy path = go path
where
go (TheorySource k i j) = case graphChoice of
"json" -> renderGraphCode "json" (casesJsonCode k i j)
_ -> renderGraphCode "dot" (casesDotCode k i j)
go (TheoryProof l p) = case graphChoice of
"json" -> renderGraphCode "json" (proofPathJsonCode l p)
_ -> renderGraphCode "dot" (proofPathDotCode l p)
go _ = error "Unhandled theory path. This is a bug."
-- Prefix dot code with comment mentioning all protocol rule names
prefixedShowDot dot = unlines
[ "// simplification: " ++ simplificationLevel
, "// protocol rules: " ++ ruleList (getProtoRuleEs thy)
, "// message deduction rules: " ++ ruleList (getIntrVariants thy)
, "// abbreviate: " ++ show abbreviate
, D.showDot dot
]
where
ruleList :: HasRuleName (Rule i) => [Rule i] -> String
ruleList = concat . intersperse ", " . nub . map showRuleCaseName
-- Get dot code for required cases
casesDotCode k i j = prefixedShowDot $
compact $ snd $ cases !! (i-1) !! (j-1)
where
cases = map (getDisj . get cdCases) (getSource k thy)
-- Get JSON code for required cases
casesJsonCode k i j =
showJsonGraphFunct ("Theory: " ++ (get thyName thy) ++ " Case: " ++ show i ++ ":" ++ show j)
$ snd $ cases !! (i-1) !! (j-1)
where
cases = map (getDisj . get cdCases) (getSource k thy)
-- Get dot code for proof path in lemma
proofPathDotCode lemma proofPath =
prefixedShowDot $ fromMaybe (return ()) $ do
subProof <- resolveProofPath thy lemma proofPath
sequent <- psInfo $ root subProof
return $ compact sequent
-- Get JSON for proof path in lemma
proofPathJsonCode lemma proofPath =
fromMaybe ("") $ do
subProof <- resolveProofPath thy lemma proofPath
sequent <- psInfo $ root subProof
return $ showJsonGraphFunct ("Theory: " ++ (get thyName thy) ++ " Lemma: " ++ lemma) sequent
-- Render a piece of dot or JSON code
renderGraphCode choice code = do
let graphPath = cacheDir_ </> getGraphPath choice code
imgPath = addExtension graphPath (show imgFormat)
-- A busy wait loop with a maximal number of iterations
renderedOrRendering :: Int -> IO Bool
renderedOrRendering n = do
graphExists <- doesFileExist graphPath
imgExists <- doesFileExist imgPath
if (n <= 0 || (graphExists && not imgExists))
then do threadDelay 100 -- wait 10 ms
renderedOrRendering (n - 1)
else return imgExists
-- Ensure that the output directory exists.
createDirectoryIfMissing True (takeDirectory graphPath)
imgGenerated <- firstSuccess
[ -- There might be some other thread that rendered or is rendering
-- this dot file. We wait at most 50 iterations (0.5 sec timout)
-- for this other thread to render the image. Afterwards, we give
-- it a try by ourselves.
renderedOrRendering 50
-- create dot-file and render to image
, do writeFile graphPath code
-- select the correct command to generate img
if (choice == "json")
then jsonToImg graphPath imgPath
else dotToImg "dot" graphPath imgPath
-- sometimes 'dot' fails => use 'fdp' as a backup tool
, if (choice == "dot")
then dotToImg "fdp" graphPath imgPath
else return False
]
if imgGenerated
then return imgPath
else trace ("WARNING: failed to convert:\n '" ++ graphPath ++ "'")
(return imgPath)
-- render img file from json file
jsonToImg jsonFile imgFile = do
(ecode,_out,err) <- readProcessWithExitCode graphCommand [imgFile, jsonFile] ""
case ecode of
ExitSuccess -> return True
ExitFailure i -> do
putStrLn $ "jsonToImg: "++graphCommand++" failed with code "
++show i++" for file "++jsonFile++":\n"++err
return False
-- render img file from dot file
dotToImg dotMode dotFile imgFile = do
(ecode,_out,err) <- readProcessWithExitCode graphCommand
[ "-T"++show imgFormat, "-K"++dotMode, "-o",imgFile, dotFile]
""
case ecode of
ExitSuccess -> return True
ExitFailure i -> do
putStrLn $ "dotToImg: "++graphCommand++" failed with code "
++show i++" for file "++dotFile++":\n"++err
return False
firstSuccess [] = return False
firstSuccess (m:ms) = do
s <- m
if s then return True else firstSuccess ms
-- | Render the image corresponding to the given theory path.
imgDiffThyPath :: ImageFormat
-> FilePath -- ^ 'dot' command
-> FilePath -- ^ Tamarin's cache directory
-> (System -> D.Dot ())
-> String -- ^ Simplification level of graph (string representation of integer >= 0)
-> Bool -- ^ True iff we want abbreviations
-> ClosedDiffTheory
-> DiffTheoryPath
-> Bool
-> IO FilePath -- ^ True if we want the mirror graph
imgDiffThyPath imgFormat dotCommand cacheDir_ compact simplificationLevel abbreviate thy path mirror = go path
where
go (DiffTheorySource s k d i j) = renderDotCode (casesDotCode s k i j d)
go (DiffTheoryProof s l p) = renderDotCode (proofPathDotCode s l p)
go (DiffTheoryDiffProof l p) = renderDotCode (proofPathDotCodeDiff l p mirror)
go _ = error "Unhandled theory path. This is a bug."
-- Prefix dot code with comment mentioning all protocol rule names
prefixedShowDot dot = unlines
[ "// simplification: " ++ simplificationLevel
, "// protocol rules: " ++ ruleList (getProtoRuleEsDiff LHS thy) -- FIXME RS: the rule names are the same on LHS and RHS, so we just pick LHS; should pass the current Side through to make this clean
, "// message deduction rules: " ++ ruleList (getIntrVariantsDiff LHS thy) -- FIXME RS: the intruder rule names are the same on LHS and RHS; should pass the current Side through to make this clean
-- , "// message deduction rules: " ++ ruleList ((intruderRules . get (_crcRules . diffThyCacheLeft)) thy) -- FIXME RS: again, we arbitrarily pick the LHS version of the cache, should be the same on both sides
--intruderRules . L.get (crcRules . diffThyCacheLeft)
, "// abbreviate: " ++ show abbreviate
, D.showDot dot
]
where
ruleList :: HasRuleName (Rule i) => [Rule i] -> String
ruleList = concat . intersperse ", " . nub . map showRuleCaseName
-- Get dot code for required cases
casesDotCode s k i j isdiff = prefixedShowDot $
compact $ snd $ cases !! (i-1) !! (j-1)
where
cases = map (getDisj . get cdCases) (getDiffSource s isdiff k thy)
-- Get dot code for proof path in lemma
proofPathDotCode s lemma proofPath =
D.showDot $ fromMaybe (return ()) $ do
subProof <- resolveProofPathDiff thy s lemma proofPath
sequent <- psInfo $ root subProof
return $ compact sequent
-- Get dot code for proof path in lemma
proofPathDotCodeDiff lemma proofPath mir =
D.showDot $ fromMaybe (return ()) $ do
subProof <- resolveProofPathDiffLemma thy lemma proofPath
diffSequent <- dpsInfo $ root subProof
if mir
then do
lem <- lookupDiffLemma lemma thy
let ctxt = getDiffProofContext lem thy
side <- get dsSide diffSequent
let isSolved s sys' = (rankProofMethods GoalNrRanking (eitherProofContext ctxt s) sys') == [] -- checks if the system is solved
nsequent <- get dsSystem diffSequent
-- Here we can potentially get Nothing if there is no mirror DG
sequentList <- snd <$> getMirrorDGandEvaluateRestrictions ctxt diffSequent (isSolved side nsequent)
if null sequentList then Nothing else return $ compact $ head sequentList
else do
sequent <- get dsSystem diffSequent
return $ compact sequent
-- Render a piece of dot code
renderDotCode code = do
let dotPath = cacheDir_ </> getDotPath code
imgPath = addExtension dotPath (show imgFormat)
-- A busy wait loop with a maximal number of iterations
renderedOrRendering :: Int -> IO Bool
renderedOrRendering n = do
dotExists <- doesFileExist dotPath
imgExists <- doesFileExist imgPath
if (n <= 0 || (dotExists && not imgExists))
then do threadDelay 100 -- wait 10 ms
renderedOrRendering (n - 1)
else return imgExists
-- Ensure that the output directory exists.
createDirectoryIfMissing True (takeDirectory dotPath)
imgGenerated <- firstSuccess
[ -- There might be some other thread that rendered or is rendering
-- this dot file. We wait at most 50 iterations (0.5 sec timout)
-- for this other thread to render the image. Afterwards, we give
-- it a try by ourselves.
renderedOrRendering 50
-- create dot-file and render to image
, do writeFile dotPath code
dotToImg "dot" dotPath imgPath
-- sometimes 'dot' fails => use 'fdp' as a backup tool
, dotToImg "fdp" dotPath imgPath
]
if imgGenerated
then return imgPath
else trace ("WARNING: failed to convert:\n '" ++ dotPath ++ "'")
(return imgPath)
dotToImg dotMode dotFile imgFile = do
(ecode,_out,err) <- readProcessWithExitCode dotCommand
[ "-T"++show imgFormat, "-K"++dotMode, "-o",imgFile, dotFile]
""
case ecode of
ExitSuccess -> return True
ExitFailure i -> do
putStrLn $ "dotToImg: "++dotCommand++" failed with code "
++show i++" for file "++dotFile++":\n"++err
return False
firstSuccess [] = return False
firstSuccess (m:ms) = do
s <- m
if s then return True else firstSuccess ms
-- | Get title to display for a given proof path.
titleThyPath :: ClosedTheory -> TheoryPath -> String
titleThyPath thy path = go path
where
go TheoryHelp = "Theory: " ++ get thyName thy
go TheoryRules = "Multiset rewriting rules and restrictions"
go TheoryMessage = "Message theory"
go (TheorySource RawSource _ _) = "Raw sources"
go (TheorySource RefinedSource _ _) = "Refined sources"
go (TheoryLemma l) = "Lemma: " ++ l
go (TheoryProof l []) = "Lemma: " ++ l
go (TheoryProof l p)
| null (last p) = "Method: " ++ methodName l p
| otherwise = "Case: " ++ last p
go (TheoryMethod _ _ _) = "Method Path: This title should not be shown. Please file a bug"
methodName l p =
case resolveProofPath thy l p of
Nothing -> "None"
Just proof -> renderHtmlDoc $ prettyProofMethod $ psMethod $ root proof
-- | Get title to display for a given proof path.
titleDiffThyPath :: ClosedDiffTheory -> DiffTheoryPath -> String
titleDiffThyPath thy path = go path
where
go DiffTheoryHelp = "Theory: " ++ get diffThyName thy
go (DiffTheoryRules s d) = "Multiset rewriting rules and restrictions [" ++ show s ++ "]" ++ if d then " [Diff]" else ""
go DiffTheoryDiffRules = "Multiset rewriting rules and restrictions - unprocessed"
go (DiffTheoryMessage s d) = "Message theory [" ++ show s ++ "]" ++ if d then " [Diff]" else ""
go (DiffTheorySource s RawSource d _ _) = "Raw sources [" ++ show s ++ "]" ++ if d then " [Diff]" else ""
go (DiffTheorySource s RefinedSource d _ _) = "Refined sources [" ++ show s ++ "]" ++ if d then " [Diff]" else ""
go (DiffTheoryLemma s l) = "Lemma: " ++ l ++ "[" ++ show s ++ "]"
go (DiffTheoryDiffLemma l) = "DiffLemma: " ++ l
go (DiffTheoryProof s l []) = "Lemma: " ++ l ++ "[" ++ show s ++ "]"
go (DiffTheoryProof s l p)
| null (last p) = "Method: " ++ methodName s l p
| otherwise = "Case: " ++ last p
go (DiffTheoryDiffProof l []) = "Diff-Lemma: " ++ l
go (DiffTheoryDiffProof l p)
| null (last p) = "Method: " ++ diffMethodName l p
| otherwise = "Case: " ++ last p
go (DiffTheoryMethod _ _ _ _) = "Method Path: This title should not be shown. Please file a bug"
go (DiffTheoryDiffMethod _ _ _) = "DiffMethod Path: This title should not be shown. Please file a bug"
methodName s l p =
case resolveProofPathDiff thy s l p of
Nothing -> "None"
Just proof -> renderHtmlDoc $ prettyProofMethod $ psMethod $ root proof
diffMethodName l p =
case resolveProofPathDiffLemma thy l p of
Nothing -> "None"
Just proof -> renderHtmlDoc $ prettyDiffProofMethod $ dpsMethod $ root proof
-- | Resolve a proof path.
resolveProofPath :: ClosedTheory -- ^ Theory to resolve in
-> String -- ^ Name of lemma
-> ProofPath -- ^ Path to resolve
-> Maybe IncrementalProof
resolveProofPath thy lemmaName path = do
lemma <- lookupLemma lemmaName thy
get lProof lemma `atPath` path
-- | Resolve a diff proof path.
resolveProofPathDiff :: ClosedDiffTheory -- ^ Theory to resolve in
-> Side -- ^ Side of lemma
-> String -- ^ Name of lemma
-> ProofPath -- ^ Path to resolve
-> Maybe IncrementalProof
resolveProofPathDiff thy s lemmaName path = do
lemma <- lookupLemmaDiff s lemmaName thy
get lProof lemma `atPath` path
-- | Resolve a proof path for a diff lemma.
resolveProofPathDiffLemma :: ClosedDiffTheory -- ^ Theory to resolve in
-> String -- ^ Name of lemma
-> ProofPath -- ^ Path to resolve
-> Maybe IncrementalDiffProof
resolveProofPathDiffLemma thy lemmaName path = do
lemma <- lookupDiffLemma lemmaName thy
get lDiffProof lemma `atPathDiff` path
------------------------------------------------------------------------------
-- Moving to next/prev proof path
------------------------------------------------------------------------------
-- | Get 'next' theory path.
nextThyPath :: ClosedTheory -> TheoryPath -> TheoryPath
nextThyPath thy = go
where
go TheoryHelp = TheoryMessage
go TheoryMessage = TheoryRules
go TheoryRules = TheorySource RawSource 0 0
go (TheorySource RawSource _ _) = TheorySource RefinedSource 0 0
go (TheorySource RefinedSource _ _) = fromMaybe TheoryHelp firstLemma
go (TheoryLemma lemma) = TheoryProof lemma []
go (TheoryProof l p)
| Just nextPath <- getNextPath l p = TheoryProof l nextPath
| Just nextLemma <- getNextLemma l = TheoryProof nextLemma []
| otherwise = TheoryProof l p
go path@(TheoryMethod _ _ _) = path
lemmas = map (\l -> (get lName l, l)) $ getLemmas thy
firstLemma = flip TheoryProof [] . fst <$> listToMaybe lemmas
getNextPath lemmaName path = do
lemma <- lookupLemma lemmaName thy
let paths = map fst $ getProofPaths $ get lProof lemma
getNextElement (== path) paths
getNextLemma lemmaName = getNextElement (== lemmaName) (map fst lemmas)
-- | Get 'next' diff theory path.
nextDiffThyPath :: ClosedDiffTheory -> DiffTheoryPath -> DiffTheoryPath
nextDiffThyPath thy = go
where
go DiffTheoryHelp = DiffTheoryDiffRules
go DiffTheoryDiffRules = DiffTheoryMessage LHS False
go (DiffTheoryMessage LHS False) = DiffTheoryMessage RHS False
go (DiffTheoryMessage RHS False) = DiffTheoryMessage LHS True
go (DiffTheoryMessage LHS True) = DiffTheoryMessage RHS True
go (DiffTheoryMessage RHS True) = (DiffTheoryRules LHS False)
go (DiffTheoryRules LHS False) = DiffTheoryRules RHS False
go (DiffTheoryRules RHS False) = DiffTheoryRules LHS True
go (DiffTheoryRules LHS True) = DiffTheoryRules RHS True
go (DiffTheoryRules RHS True) = DiffTheorySource LHS RawSource False 0 0
go (DiffTheorySource LHS RawSource False _ _) = DiffTheorySource RHS RawSource False 0 0
go (DiffTheorySource RHS RawSource False _ _) = DiffTheorySource LHS RawSource True 0 0
go (DiffTheorySource LHS RawSource True _ _) = DiffTheorySource RHS RawSource True 0 0
go (DiffTheorySource RHS RawSource True _ _) = DiffTheorySource LHS RefinedSource False 0 0
go (DiffTheorySource LHS RefinedSource False _ _) = DiffTheorySource RHS RefinedSource False 0 0
go (DiffTheorySource RHS RefinedSource False _ _) = DiffTheorySource LHS RefinedSource True 0 0
go (DiffTheorySource LHS RefinedSource True _ _) = DiffTheorySource RHS RefinedSource True 0 0
go (DiffTheorySource RHS RefinedSource True _ _) = fromMaybe DiffTheoryHelp firstLemma
go (DiffTheoryLemma s lemma) = DiffTheoryProof s lemma []
go (DiffTheoryDiffLemma lemma) = DiffTheoryDiffProof lemma []
go (DiffTheoryProof s l p)
| Just nextPath <- getNextPath s l p = DiffTheoryProof s l nextPath
| Just nextLemma <- getNextLemma s l = DiffTheoryProof s nextLemma []
| s == LHS = case lemmas RHS of
[] -> firstDiffLemma
l':_ -> (DiffTheoryProof RHS (fst l') [])
| s == RHS = firstDiffLemma
| otherwise = DiffTheoryProof s l p
go (DiffTheoryDiffProof l p)
| Just nextPath <- getNextDiffPath l p = DiffTheoryDiffProof l nextPath
| Just nextDiffLemma <- getNextDiffLemma l = DiffTheoryDiffProof nextDiffLemma []
| otherwise = DiffTheoryDiffProof l p
go path@(DiffTheoryMethod _ _ _ _) = path
go path@(DiffTheoryDiffMethod _ _ _) = path
firstDiffLemma = case getDiffLemmas thy of
[] -> DiffTheoryHelp
l:_ -> DiffTheoryDiffProof (get lDiffName l) []
lemmas s = map (\l -> (get lName l, l)) $ diffTheorySideLemmas s thy
diffLemmas = map (\l -> (get lDiffName l, l)) $ diffTheoryDiffLemmas thy
firstLemma = case lemmas LHS of
[] -> case lemmas RHS of
[] -> Nothing
l:_ -> Just (DiffTheoryProof RHS (fst l) [])
l:_ -> Just (DiffTheoryProof LHS (fst l) [])
getNextPath s lemmaName path = do
lemma <- lookupLemmaDiff s lemmaName thy
let paths = map fst $ getProofPaths $ get lProof lemma
getNextElement (== path) paths
getNextDiffPath lemmaName path = do
lemma <- lookupDiffLemma lemmaName thy
let paths = map fst $ getDiffProofPaths $ get lDiffProof lemma
getNextElement (== path) paths
getNextLemma s lemmaName = getNextElement (== lemmaName) (map fst (lemmas s))
getNextDiffLemma lemmaName = getNextElement (== lemmaName) (map fst (diffLemmas))
-- | Get 'prev' theory path.
prevThyPath :: ClosedTheory -> TheoryPath -> TheoryPath
prevThyPath thy = go
where
go TheoryHelp = TheoryHelp
go TheoryMessage = TheoryHelp
go TheoryRules = TheoryMessage
go (TheorySource RawSource _ _) = TheoryRules
go (TheorySource RefinedSource _ _) = TheorySource RawSource 0 0
go (TheoryLemma l)
| Just prevLemma <- getPrevLemma l = TheoryProof prevLemma (lastPath prevLemma)
| otherwise = TheorySource RefinedSource 0 0
go (TheoryProof l p)
| Just prevPath <- getPrevPath l p = TheoryProof l prevPath
| Just prevLemma <- getPrevLemma l = TheoryProof prevLemma (lastPath prevLemma)
| otherwise = TheorySource RefinedSource 0 0
go path@(TheoryMethod _ _ _) = path
lemmas = map (\l -> (get lName l, l)) $ getLemmas thy
getPrevPath lemmaName path = do
lemma <- lookupLemma lemmaName thy
let paths = map fst $ getProofPaths $ get lProof lemma
getPrevElement (== path) paths
lastPath lemmaName = last $ map fst $ getProofPaths $
get lProof $ fromJust $ lookupLemma lemmaName thy
getPrevLemma lemmaName = getPrevElement (== lemmaName) (map fst lemmas)
-- | Get 'prev' diff theory path.
prevDiffThyPath :: ClosedDiffTheory -> DiffTheoryPath -> DiffTheoryPath
prevDiffThyPath thy = go
where
go DiffTheoryHelp = DiffTheoryHelp
go DiffTheoryDiffRules = DiffTheoryHelp
go (DiffTheoryMessage LHS False) = DiffTheoryDiffRules
go (DiffTheoryMessage RHS False) = DiffTheoryMessage LHS False
go (DiffTheoryMessage LHS True) = DiffTheoryMessage RHS False
go (DiffTheoryMessage RHS True) = DiffTheoryMessage LHS True
go (DiffTheoryRules LHS False) = DiffTheoryMessage RHS True
go (DiffTheoryRules RHS False) = DiffTheoryRules LHS False
go (DiffTheoryRules LHS True) = DiffTheoryRules RHS False
go (DiffTheoryRules RHS True) = DiffTheoryRules LHS True
go (DiffTheorySource LHS RawSource False _ _) = DiffTheoryRules RHS True
go (DiffTheorySource RHS RawSource False _ _) = DiffTheorySource LHS RawSource False 0 0
go (DiffTheorySource LHS RawSource True _ _) = DiffTheorySource RHS RawSource False 0 0
go (DiffTheorySource RHS RawSource True _ _) = DiffTheorySource LHS RawSource True 0 0
go (DiffTheorySource LHS RefinedSource False _ _) = DiffTheorySource RHS RawSource True 0 0
go (DiffTheorySource RHS RefinedSource False _ _) = DiffTheorySource LHS RefinedSource False 0 0
go (DiffTheorySource LHS RefinedSource True _ _) = DiffTheorySource RHS RefinedSource False 0 0
go (DiffTheorySource RHS RefinedSource True _ _) = DiffTheorySource LHS RefinedSource True 0 0
go (DiffTheoryLemma s l)
| Just prevLemma <- getPrevLemma s l = DiffTheoryProof s prevLemma (lastPath s prevLemma)
| otherwise = DiffTheorySource RHS RefinedSource True 0 0
go (DiffTheoryDiffLemma l)
| Just prevLemma <- getPrevDiffLemma l = DiffTheoryDiffProof prevLemma (lastPathDiff prevLemma)
| otherwise = lastLemmaRHS
go (DiffTheoryProof s l p)
| Just prevPath <- getPrevPath s l p = DiffTheoryProof s l prevPath
| Just prevLemma <- getPrevLemma s l = DiffTheoryProof s prevLemma (lastPath s prevLemma)
| s == RHS = lastLemmaLHS
| otherwise = DiffTheorySource RHS RefinedSource True 0 0
go (DiffTheoryDiffProof l p)
| Just prevPath <- getPrevDiffPath l p = DiffTheoryDiffProof l prevPath
| Just prevDiffLemma <- getPrevDiffLemma l = DiffTheoryDiffProof prevDiffLemma (lastPathDiff prevDiffLemma)
| otherwise = lastLemmaRHS
go path@(DiffTheoryMethod _ _ _ _) = path
go path@(DiffTheoryDiffMethod _ _ _) = path
lemmas s = map (\l -> (get lName l, l)) $ diffTheorySideLemmas s thy
diffLemmas = map (\l -> (get lDiffName l, l)) $ diffTheoryDiffLemmas thy
getPrevPath s lemmaName path = do
lemma <- lookupLemmaDiff s lemmaName thy
let paths = map fst $ getProofPaths $ get lProof lemma
getPrevElement (== path) paths
getPrevDiffPath lemmaName path = do
lemma <- lookupDiffLemma lemmaName thy
let paths = map fst $ getDiffProofPaths $ get lDiffProof lemma
getPrevElement (== path) paths
lastPath s lemmaName = last $ map fst $ getProofPaths $
get lProof $ fromJust $ lookupLemmaDiff s lemmaName thy
lastPathDiff lemmaName = last $ map fst $ getDiffProofPaths $
get lDiffProof $ fromJust $ lookupDiffLemma lemmaName thy
getPrevLemma s lemmaName = getPrevElement (== lemmaName) (map fst (lemmas s))
getPrevDiffLemma lemmaName = getPrevElement (== lemmaName) (map fst (diffLemmas))
lastLemmaLHS = case lemmas LHS of
[] -> DiffTheorySource RHS RefinedSource True 0 0
l -> DiffTheoryProof LHS (fst (last l)) (lastPath LHS (fst (last l)))
lastLemmaRHS = case lemmas RHS of
[] -> lastLemmaLHS
l -> DiffTheoryProof RHS (fst (last l)) (lastPath RHS (fst (last l)))
-- | Interesting proof methods that are not skipped by next/prev-smart.
isInterestingMethod :: ProofMethod -> Bool
isInterestingMethod (Sorry _) = True
isInterestingMethod Solved = True
isInterestingMethod _ = False
-- | Interesting diff proof methods that are not skipped by next/prev-smart.
isInterestingDiffMethod :: DiffProofMethod -> Bool
isInterestingDiffMethod (DiffSorry _) = True
isInterestingDiffMethod DiffAttack = True
isInterestingDiffMethod _ = False
-- Get 'next' smart theory path.
nextSmartThyPath :: ClosedTheory -> TheoryPath -> TheoryPath
nextSmartThyPath thy = go
where
go TheoryHelp = TheoryMessage
go TheoryMessage = TheoryRules
go TheoryRules = TheorySource RawSource 0 0
go (TheorySource RawSource _ _) = TheorySource RefinedSource 0 0
go (TheorySource RefinedSource _ _) = fromMaybe TheoryHelp firstLemma
go (TheoryLemma lemma) = TheoryProof lemma []
go (TheoryProof l p)
| Just nextPath <- getNextPath l p = TheoryProof l nextPath
| Just nextLemma <- getNextLemma l = TheoryProof nextLemma []
| otherwise = TheoryProof l p
go path@(TheoryMethod _ _ _) = path
lemmas = map (\l -> (get lName l, l)) $ getLemmas thy
firstLemma = flip TheoryProof [] . fst <$> listToMaybe lemmas
getNextPath lemmaName path = do
lemma <- lookupLemma lemmaName thy
let paths = getProofPaths $ get lProof lemma
case dropWhile ((/= path) . fst) paths of
[] -> Nothing
nextSteps -> listToMaybe . map fst . filter (isInterestingMethod . snd) $ tail nextSteps
getNextLemma lemmaName = getNextElement (== lemmaName) (map fst lemmas)
-- Get 'next' smart theory path.
nextSmartDiffThyPath :: ClosedDiffTheory -> DiffTheoryPath -> DiffTheoryPath
nextSmartDiffThyPath thy = go
where
go DiffTheoryHelp = DiffTheoryDiffRules
go DiffTheoryDiffRules = DiffTheoryMessage LHS False
go (DiffTheoryMessage LHS False) = DiffTheoryMessage RHS False
go (DiffTheoryMessage RHS False) = DiffTheoryMessage LHS True
go (DiffTheoryMessage LHS True) = DiffTheoryMessage RHS True
go (DiffTheoryMessage RHS True) = (DiffTheoryRules LHS False)
go (DiffTheoryRules LHS False) = DiffTheoryRules RHS False
go (DiffTheoryRules RHS False) = DiffTheoryRules LHS True
go (DiffTheoryRules LHS True) = DiffTheoryRules RHS True
go (DiffTheoryRules RHS True) = DiffTheorySource LHS RawSource False 0 0
go (DiffTheorySource LHS RawSource False _ _) = DiffTheorySource RHS RawSource False 0 0
go (DiffTheorySource RHS RawSource False _ _) = DiffTheorySource LHS RawSource True 0 0
go (DiffTheorySource LHS RawSource True _ _) = DiffTheorySource RHS RawSource True 0 0
go (DiffTheorySource RHS RawSource True _ _) = DiffTheorySource LHS RefinedSource False 0 0
go (DiffTheorySource LHS RefinedSource False _ _) = DiffTheorySource RHS RefinedSource False 0 0
go (DiffTheorySource RHS RefinedSource False _ _) = DiffTheorySource LHS RefinedSource True 0 0
go (DiffTheorySource LHS RefinedSource True _ _) = DiffTheorySource RHS RefinedSource True 0 0
go (DiffTheorySource RHS RefinedSource True _ _) = fromMaybe DiffTheoryHelp firstLemma
go (DiffTheoryLemma s lemma) = DiffTheoryProof s lemma []
go (DiffTheoryDiffLemma lemma) = DiffTheoryDiffProof lemma []
go (DiffTheoryProof s l p)
| Just nextPath <- getNextPath s l p = DiffTheoryProof s l nextPath
| Just nextLemma <- getNextLemma s l = DiffTheoryProof s nextLemma []
| s == LHS = case lemmas RHS of
[] -> firstDiffLemma
l':_ -> (DiffTheoryProof RHS (fst l') [])
| s == RHS = firstDiffLemma
| otherwise = DiffTheoryProof s l p
go (DiffTheoryDiffProof l p)
| Just nextPath <- getNextDiffPath l p = DiffTheoryDiffProof l nextPath
| Just nextLemma <- getNextDiffLemma l = DiffTheoryDiffProof nextLemma []
| otherwise = DiffTheoryDiffProof l p
go path@(DiffTheoryMethod _ _ _ _) = path
go path@(DiffTheoryDiffMethod _ _ _) = path
firstDiffLemma = case getDiffLemmas thy of
[] -> DiffTheoryHelp
l:_ -> DiffTheoryDiffProof (get lDiffName l) []
lemmas s = map (\l -> (get lName l, l)) $ diffTheorySideLemmas s thy
diffLemmas = map (\l -> (get lDiffName l, l)) $ diffTheoryDiffLemmas thy
firstLemma = case lemmas LHS of
[] -> case lemmas RHS of
[] -> Nothing
l:_ -> Just (DiffTheoryProof RHS (fst l) [])
l:_ -> Just (DiffTheoryProof LHS (fst l) [])
getNextPath s lemmaName path = do
lemma <- lookupLemmaDiff s lemmaName thy
let paths = getProofPaths $ get lProof lemma
case dropWhile ((/= path) . fst) paths of
[] -> Nothing
nextSteps -> listToMaybe . map fst . filter (isInterestingMethod . snd) $ tail nextSteps
getNextDiffPath lemmaName path = do
lemma <- lookupDiffLemma lemmaName thy
let paths = getDiffProofPaths $ get lDiffProof lemma
case dropWhile ((/= path) . fst) paths of
[] -> Nothing
nextSteps -> listToMaybe . map fst . filter (isInterestingDiffMethod . snd) $ tail nextSteps
getNextLemma s lemmaName = getNextElement (== lemmaName) (map fst (lemmas s))
getNextDiffLemma lemmaName = getNextElement (== lemmaName) (map fst (diffLemmas))
-- Get 'prev' smart theory path.
prevSmartThyPath :: ClosedTheory -> TheoryPath -> TheoryPath
prevSmartThyPath thy = go
where
go TheoryHelp = TheoryHelp
go TheoryMessage = TheoryHelp
go TheoryRules = TheoryMessage
go (TheorySource RawSource _ _) = TheoryRules
go (TheorySource RefinedSource _ _) = TheorySource RawSource 0 0
go (TheoryLemma l)
| Just prevLemma <- getPrevLemma l = TheoryProof prevLemma (lastPath prevLemma)
| otherwise = TheorySource RefinedSource 0 0
go (TheoryProof l p)
| Just prevPath <- getPrevPath l p = TheoryProof l prevPath
-- | Just firstPath <- getFirstPath l p = TheoryProof l firstPath
| Just prevLemma <- getPrevLemma l = TheoryProof prevLemma (lastPath prevLemma)
| otherwise = TheorySource RefinedSource 0 0
go path@(TheoryMethod _ _ _) = path
lemmas = map (\l -> (get lName l, l)) $ getLemmas thy
{-
getFirstPath lemmaName current = do
lemma <- lookupLemma lemmaName thy
let paths = map fst $ getProofPaths $ get lProof lemma
if null paths || (head paths == current)
then Nothing
else Just $ head paths
-}
getPrevPath lemmaName path = do
lemma <- lookupLemma lemmaName thy
let paths = getProofPaths $ get lProof lemma
case filter (isInterestingMethod . snd) . takeWhile ((/= path) . fst) $ paths of
[] -> Nothing
prevSteps -> Just . fst . last $ prevSteps
lastPath lemmaName = last $ map fst $ getProofPaths $
get lProof $ fromJust $ lookupLemma lemmaName thy
getPrevLemma lemmaName = getPrevElement (== lemmaName) (map fst lemmas)
-- Get 'prev' smart diff theory path.
prevSmartDiffThyPath :: ClosedDiffTheory -> DiffTheoryPath -> DiffTheoryPath
prevSmartDiffThyPath thy = go
where
go DiffTheoryHelp = DiffTheoryHelp
go DiffTheoryDiffRules = DiffTheoryHelp
go (DiffTheoryMessage LHS False) = DiffTheoryDiffRules
go (DiffTheoryMessage RHS False) = DiffTheoryMessage LHS False
go (DiffTheoryMessage LHS True) = DiffTheoryMessage RHS False
go (DiffTheoryMessage RHS True) = DiffTheoryMessage LHS True
go (DiffTheoryRules LHS False) = DiffTheoryMessage RHS True
go (DiffTheoryRules RHS False) = DiffTheoryRules LHS False
go (DiffTheoryRules LHS True) = DiffTheoryRules RHS False
go (DiffTheoryRules RHS True) = DiffTheoryRules LHS True
go (DiffTheorySource LHS RawSource False _ _) = DiffTheoryRules RHS True
go (DiffTheorySource RHS RawSource False _ _) = DiffTheorySource LHS RawSource False 0 0
go (DiffTheorySource LHS RawSource True _ _) = DiffTheorySource RHS RawSource False 0 0
go (DiffTheorySource RHS RawSource True _ _) = DiffTheorySource LHS RawSource True 0 0
go (DiffTheorySource LHS RefinedSource False _ _) = DiffTheorySource RHS RawSource True 0 0
go (DiffTheorySource RHS RefinedSource False _ _) = DiffTheorySource LHS RefinedSource False 0 0
go (DiffTheorySource LHS RefinedSource True _ _) = DiffTheorySource RHS RefinedSource False 0 0
go (DiffTheorySource RHS RefinedSource True _ _) = DiffTheorySource LHS RefinedSource True 0 0
go (DiffTheoryLemma s l)
| Just prevLemma <- getPrevLemma s l = DiffTheoryProof s prevLemma (lastPath s prevLemma)
| otherwise = DiffTheorySource RHS RefinedSource True 0 0
go (DiffTheoryDiffLemma l)
| Just prevLemma <- getPrevDiffLemma l = DiffTheoryDiffProof prevLemma (lastPathDiff prevLemma)
| otherwise = lastLemmaRHS
go (DiffTheoryProof s l p)
| Just prevPath <- getPrevPath s l p = DiffTheoryProof s l prevPath
| Just prevLemma <- getPrevLemma s l = DiffTheoryProof s prevLemma (lastPath s prevLemma)
| s == RHS = lastLemmaLHS
| otherwise = DiffTheorySource RHS RefinedSource True 0 0
go (DiffTheoryDiffProof l p)
| Just prevPath <- getPrevPathDiff l p = DiffTheoryDiffProof l prevPath
| Just prevDiffLemma <- getPrevDiffLemma l = DiffTheoryDiffProof prevDiffLemma (lastPathDiff prevDiffLemma)
| otherwise = lastLemmaRHS
go path@(DiffTheoryMethod _ _ _ _) = path
go path@(DiffTheoryDiffMethod _ _ _) = path
lemmas s = map (\l -> (get lName l, l)) $ diffTheorySideLemmas s thy
diffLemmas = map (\l -> (get lDiffName l, l)) $ diffTheoryDiffLemmas thy
{-
getFirstPath lemmaName current = do
lemma <- lookupLemma lemmaName thy
let paths = map fst $ getProofPaths $ get lProof lemma
if null paths || (head paths == current)
then Nothing
else Just $ head paths
-}
getPrevPath s lemmaName path = do
lemma <- lookupLemmaDiff s lemmaName thy
let paths = getProofPaths $ get lProof lemma
case filter (isInterestingMethod . snd) . takeWhile ((/= path) . fst) $ paths of
[] -> Nothing
prevSteps -> Just . fst . last $ prevSteps
getPrevPathDiff lemmaName path = do
lemma <- lookupDiffLemma lemmaName thy
let paths = getDiffProofPaths $ get lDiffProof lemma
case filter (isInterestingDiffMethod . snd) . takeWhile ((/= path) . fst) $ paths of
[] -> Nothing
prevSteps -> Just . fst . last $ prevSteps
lastPath s lemmaName = last $ map fst $ getProofPaths $
get lProof $ fromJust $ lookupLemmaDiff s lemmaName thy
lastPathDiff lemmaName = last $ map fst $ getDiffProofPaths $
get lDiffProof $ fromJust $ lookupDiffLemma lemmaName thy
getPrevLemma s lemmaName = getPrevElement (== lemmaName) (map fst (lemmas s))
getPrevDiffLemma lemmaName = getPrevElement (== lemmaName) (map fst (diffLemmas))
lastLemmaLHS = case lemmas LHS of
[] -> DiffTheorySource RHS RefinedSource True 0 0
l -> DiffTheoryProof LHS (fst (last l)) (lastPath LHS (fst (last l)))
lastLemmaRHS = case lemmas RHS of
[] -> lastLemmaLHS
l -> DiffTheoryProof RHS (fst (last l)) (lastPath RHS (fst (last l)))
-- | Extract proof paths out of a proof.
getProofPaths :: LTree CaseName (ProofStep a) -> [([String], ProofMethod)]
getProofPaths proof = ([], psMethod . root $ proof) : go proof
where
go = concatMap paths . M.toList . children
paths (lbl, prf) = ([lbl], psMethod . root $ prf) : map (first (lbl:)) (go prf)
-- | Extract proof paths out of a proof.
getDiffProofPaths :: LTree CaseName (DiffProofStep a) -> [([String], DiffProofMethod)]
getDiffProofPaths proof = ([], dpsMethod . root $ proof) : go proof
where
go = concatMap paths . M.toList . children
paths (lbl, prf) = ([lbl], dpsMethod . root $ prf) : map (first (lbl:)) (go prf)
-- | Get element _after_ the matching element in the list.
getNextElement :: (a -> Bool) -> [a] -> Maybe a
getNextElement _ [] = Nothing
getNextElement f (x:xs)
| f x = listToMaybe xs
| otherwise = getNextElement f xs
-- | Get element _before_ the matching element in the list.
getPrevElement :: (a -> Bool) -> [a] -> Maybe a
getPrevElement _ [] = Nothing
getPrevElement f (x:xs) = go x xs
where
go _ [] = Nothing
go old (z:zs)
| f z = Just old
| otherwise = go z zs
-- | Translate a proof status returned by 'annotateLemmaProof' to a
-- corresponding CSS class.
markStatus :: HtmlDocument d => (Maybe System, Maybe Bool) -> d -> d
markStatus (Nothing, _ ) = withTag "span" [("class","hl_superfluous")]
markStatus (Just _, Just True ) = withTag "span" [("class","hl_good")]
markStatus (Just _, Just False) = withTag "span" [("class","hl_bad")]
markStatus (Just _, Nothing ) = id
-- | Translate a diff proof status returned by 'annotateLemmaProof' to a
-- corresponding CSS class.
markStatusDiff :: HtmlDocument d => (Maybe DiffSystem, Maybe Bool) -> d -> d
markStatusDiff (Nothing, _ ) = withTag "span" [("class","hl_superfluous")]
markStatusDiff (Just _, Just True ) = withTag "span" [("class","hl_good")]
markStatusDiff (Just _, Just False) = withTag "span" [("class","hl_bad")]
markStatusDiff (Just _, Nothing ) = id
-- | Annotate a proof for pretty printing.
-- The boolean flag indicates that the given proof step's children
-- are (a) all annotated and (b) contain no sorry steps.
annotateLemmaProof :: Lemma IncrementalProof
-> Proof (Maybe System, Maybe Bool)
annotateLemmaProof lem =
-- error (show (get lProof lem) ++ " - " ++ show prf)
mapProofInfo (second interpret) prf
where
prf = annotateProof annotate $ get lProof lem
annotate step cs =
( psInfo step
, mconcat $ proofStepStatus step : incomplete ++ map snd cs
)
where
incomplete = if isNothing (psInfo step) then [IncompleteProof] else []
interpret status = case (get lTraceQuantifier lem, status) of
(_, IncompleteProof) -> Nothing
(_, UndeterminedProof) -> Nothing
(AllTraces, TraceFound) -> Just False
(AllTraces, CompleteProof) -> Just True
(ExistsTrace, TraceFound) -> Just True
(ExistsTrace, CompleteProof) -> Just False
-- | Annotate a proof for pretty printing.
-- The boolean flag indicates that the given proof step's children
-- are (a) all annotated and (b) contain no sorry steps.
annotateDiffLemmaProof :: DiffLemma IncrementalDiffProof
-> DiffProof (Maybe DiffSystem, Maybe Bool)
annotateDiffLemmaProof lem =
mapDiffProofInfo (second interpret) prf
where
prf = annotateDiffProof annotate $ get lDiffProof lem
annotate step cs =
( dpsInfo step
, mconcat $ diffProofStepStatus step : incomplete ++ map snd cs
)
where
incomplete = if isNothing (dpsInfo step) then [IncompleteProof] else []
interpret status = case status of
IncompleteProof -> Nothing
UndeterminedProof -> Nothing
TraceFound -> Just False
CompleteProof -> Just True
| kelnage/tamarin-prover | src/Web/Theory.hs | gpl-3.0 | 90,915 | 0 | 21 | 29,450 | 21,259 | 10,603 | 10,656 | 1,359 | 30 |
{-# LANGUAGE DeriveFunctor #-}
module Lamdu.Data.Ops
( newHole, wrap
, replace, replaceWithHole, setToHole, lambdaWrap, redexWrap
, addListItem
, newPublicDefinition
, newDefinition, presentationModeOfName
, savePreJumpPosition, jumpBack
, newPane
, newClipboard
, makeNewTag, makeNewPublicTag
, isInfix
) where
import Control.Applicative ((<$>), (<*>), (<$))
import Control.Lens.Operators
import Control.Monad (when)
import Control.MonadA (MonadA)
import Data.Store.Guid (Guid)
import Data.Store.IRef (Tag)
import Data.Store.Transaction (Transaction, getP, setP, modP)
import Lamdu.CharClassification (operatorChars)
import Lamdu.Data.Anchors (PresentationMode(..))
import Lamdu.Data.Definition (Definition(..))
import Lamdu.Data.Expression.IRef (DefI)
import qualified Data.Store.IRef as IRef
import qualified Data.Store.Property as Property
import qualified Data.Store.Transaction as Transaction
import qualified Graphics.UI.Bottle.WidgetId as WidgetId
import qualified Lamdu.Data.Anchors as Anchors
import qualified Lamdu.Data.Definition as Definition
import qualified Lamdu.Data.Expression as Expr
import qualified Lamdu.Data.Expression.IRef as ExprIRef
import qualified Lamdu.Data.Expression.Lens as ExprLens
import qualified Lamdu.Data.Expression.Utils as ExprUtil
type T = Transaction
wrap ::
MonadA m =>
ExprIRef.ExpressionProperty m ->
T m (ExprIRef.ExpressionI (Tag m))
wrap exprP = do
newFuncI <- newHole
applyI <-
ExprIRef.newExprBody .
ExprUtil.makeApply newFuncI $ Property.value exprP
Property.set exprP applyI
return applyI
newHole :: MonadA m => T m (ExprIRef.ExpressionI (Tag m))
newHole = ExprIRef.newExprBody $ Expr.BodyLeaf Expr.Hole
replace ::
MonadA m =>
ExprIRef.ExpressionProperty m ->
ExprIRef.ExpressionI (Tag m) ->
T m (ExprIRef.ExpressionI (Tag m))
replace exprP newExprI = do
Property.set exprP newExprI
return newExprI
replaceWithHole :: MonadA m => ExprIRef.ExpressionProperty m -> T m (ExprIRef.ExpressionI (Tag m))
replaceWithHole exprP = replace exprP =<< newHole
setToHole :: MonadA m => ExprIRef.ExpressionProperty m -> T m (ExprIRef.ExpressionI (Tag m))
setToHole exprP =
exprI <$ ExprIRef.writeExprBody exprI hole
where
hole = Expr.BodyLeaf Expr.Hole
exprI = Property.value exprP
lambdaWrap
:: MonadA m
=> ExprIRef.ExpressionProperty m
-> T m (Guid, ExprIRef.ExpressionI (Tag m))
lambdaWrap exprP = do
newParamTypeI <- newHole
(newParam, newExprI) <-
ExprIRef.newLambda newParamTypeI $ Property.value exprP
Property.set exprP newExprI
return (newParam, newExprI)
redexWrap
:: MonadA m
=> ExprIRef.ExpressionProperty m
-> T m (Guid, ExprIRef.ExpressionI (Tag m))
redexWrap exprP = do
newParamTypeI <- newHole
(newParam, newLambdaI) <-
ExprIRef.newLambda newParamTypeI $ Property.value exprP
newValueI <- newHole
newApplyI <-
ExprIRef.newExprBody $ ExprUtil.makeApply newLambdaI newValueI
Property.set exprP newApplyI
return (newParam, newLambdaI)
addListItem ::
MonadA m =>
Anchors.SpecialFunctions (Tag m) ->
ExprIRef.ExpressionProperty m ->
T m (ExprIRef.ExpressionI (Tag m), ExprIRef.ExpressionI (Tag m))
addListItem specialFunctions exprP = do
consTempI <-
ExprIRef.newExprBody $ ExprLens.bodyDefinitionRef # Anchors.sfCons specialFunctions
consI <-
ExprIRef.newExprBody . ExprUtil.makeApply consTempI =<< newHole
newItemI <- newHole
headTag <- ExprIRef.newExprBody $ ExprLens.bodyTag # Anchors.sfHeadTag specialFunctions
tailTag <- ExprIRef.newExprBody $ ExprLens.bodyTag # Anchors.sfTailTag specialFunctions
argsI <-
ExprIRef.newExprBody $ ExprLens.bodyKindedRecordFields Expr.Val #
[ (headTag, newItemI)
, (tailTag, Property.value exprP)
]
newListI <- ExprIRef.newExprBody $ ExprUtil.makeApply consI argsI
Property.set exprP newListI
return (newListI, newItemI)
newPane :: MonadA m => Anchors.CodeProps m -> DefI (Tag m) -> T m ()
newPane codeProps defI = do
let panesProp = Anchors.panes codeProps
panes <- getP panesProp
when (defI `notElem` panes) $
setP panesProp $ Anchors.makePane defI : panes
savePreJumpPosition :: MonadA m => Anchors.CodeProps m -> WidgetId.Id -> T m ()
savePreJumpPosition codeProps pos = modP (Anchors.preJumps codeProps) $ (pos :) . take 19
jumpBack :: MonadA m => Anchors.CodeProps m -> T m (Maybe (T m WidgetId.Id))
jumpBack codeProps = do
preJumps <- getP (Anchors.preJumps codeProps)
return $
case preJumps of
[] -> Nothing
(j:js) -> Just $ do
setP (Anchors.preJumps codeProps) js
return j
isInfix :: String -> Bool
isInfix x = not (null x) && all (`elem` operatorChars) x
presentationModeOfName :: String -> PresentationMode
presentationModeOfName x
| isInfix x = Infix
| otherwise = OO
newDefinition ::
MonadA m => String -> PresentationMode ->
ExprIRef.DefinitionI (Tag m) -> T m (DefI (Tag m))
newDefinition name presentationMode def = do
res <- Transaction.newIRef def
let guid = IRef.guid res
setP (Anchors.assocNameRef guid) name
setP (Anchors.assocPresentationMode guid) presentationMode
return res
newPublicDefinition ::
MonadA m => Anchors.CodeProps m -> String -> T m (DefI (Tag m))
newPublicDefinition codeProps name = do
defI <-
newDefinition name (presentationModeOfName name) =<<
(Definition . Definition.BodyExpression <$> newHole <*> newHole)
modP (Anchors.globals codeProps) (defI :)
return defI
newClipboard ::
MonadA m => Anchors.CodeProps m ->
ExprIRef.ExpressionI (Tag m) ->
T m (DefI (Tag m))
newClipboard codeProps expr = do
len <- length <$> getP (Anchors.clipboards codeProps)
def <- Definition (Definition.BodyExpression expr) <$> newHole
defI <- newDefinition ("clipboard" ++ show len) OO def
modP (Anchors.clipboards codeProps) (defI:)
return defI
makeNewTag :: MonadA m => String -> T m Guid
makeNewTag name = do
tag <- Transaction.newKey
tag <$ setP (Anchors.assocNameRef tag) name
makeNewPublicTag :: MonadA m => Anchors.CodeProps m -> String -> T m Guid
makeNewPublicTag codeProps name = do
tag <- makeNewTag name
modP (Anchors.tags codeProps) (tag :)
return tag
| Mathnerd314/lamdu | src/Lamdu/Data/Ops.hs | gpl-3.0 | 6,161 | 0 | 17 | 1,036 | 2,100 | 1,058 | 1,042 | 164 | 2 |
-- | Types used for web communication.
module RSCoin.Explorer.WebTypes
(
module Exports
) where
import RSCoin.Explorer.Extended as Exports
import RSCoin.Explorer.Web.Sockets.Types as Exports
| input-output-hk/rscoin-haskell | src/RSCoin/Explorer/WebTypes.hs | gpl-3.0 | 245 | 0 | 4 | 76 | 32 | 24 | 8 | 5 | 0 |
{-# LANGUAGE DataKinds #-}
{-# LANGUAGE DeriveDataTypeable #-}
{-# LANGUAGE DeriveGeneric #-}
{-# LANGUAGE FlexibleInstances #-}
{-# LANGUAGE NoImplicitPrelude #-}
{-# LANGUAGE OverloadedStrings #-}
{-# LANGUAGE RecordWildCards #-}
{-# LANGUAGE TypeFamilies #-}
{-# LANGUAGE TypeOperators #-}
{-# OPTIONS_GHC -fno-warn-duplicate-exports #-}
{-# OPTIONS_GHC -fno-warn-unused-binds #-}
{-# OPTIONS_GHC -fno-warn-unused-imports #-}
-- |
-- Module : Network.Google.Resource.GamesManagement.Quests.Reset
-- Copyright : (c) 2015-2016 Brendan Hay
-- License : Mozilla Public License, v. 2.0.
-- Maintainer : Brendan Hay <brendan.g.hay@gmail.com>
-- Stability : auto-generated
-- Portability : non-portable (GHC extensions)
--
-- Resets all player progress on the quest with the given ID for the
-- currently authenticated player. This method is only accessible to
-- whitelisted tester accounts for your application.
--
-- /See:/ <https://developers.google.com/games/services Google Play Game Services Management API Reference> for @gamesManagement.quests.reset@.
module Network.Google.Resource.GamesManagement.Quests.Reset
(
-- * REST Resource
QuestsResetResource
-- * Creating a Request
, questsReset
, QuestsReset
-- * Request Lenses
, qrQuestId
) where
import Network.Google.GamesManagement.Types
import Network.Google.Prelude
-- | A resource alias for @gamesManagement.quests.reset@ method which the
-- 'QuestsReset' request conforms to.
type QuestsResetResource =
"games" :>
"v1management" :>
"quests" :>
Capture "questId" Text :>
"reset" :>
QueryParam "alt" AltJSON :> Post '[JSON] ()
-- | Resets all player progress on the quest with the given ID for the
-- currently authenticated player. This method is only accessible to
-- whitelisted tester accounts for your application.
--
-- /See:/ 'questsReset' smart constructor.
newtype QuestsReset = QuestsReset'
{ _qrQuestId :: Text
} deriving (Eq,Show,Data,Typeable,Generic)
-- | Creates a value of 'QuestsReset' with the minimum fields required to make a request.
--
-- Use one of the following lenses to modify other fields as desired:
--
-- * 'qrQuestId'
questsReset
:: Text -- ^ 'qrQuestId'
-> QuestsReset
questsReset pQrQuestId_ =
QuestsReset'
{ _qrQuestId = pQrQuestId_
}
-- | The ID of the quest.
qrQuestId :: Lens' QuestsReset Text
qrQuestId
= lens _qrQuestId (\ s a -> s{_qrQuestId = a})
instance GoogleRequest QuestsReset where
type Rs QuestsReset = ()
type Scopes QuestsReset =
'["https://www.googleapis.com/auth/games",
"https://www.googleapis.com/auth/plus.login"]
requestClient QuestsReset'{..}
= go _qrQuestId (Just AltJSON) gamesManagementService
where go
= buildClient (Proxy :: Proxy QuestsResetResource)
mempty
| rueshyna/gogol | gogol-games-management/gen/Network/Google/Resource/GamesManagement/Quests/Reset.hs | mpl-2.0 | 3,001 | 0 | 13 | 674 | 314 | 194 | 120 | 49 | 1 |
{-# LANGUAGE DataKinds #-}
{-# LANGUAGE DeriveGeneric #-}
{-# LANGUAGE FlexibleInstances #-}
{-# LANGUAGE GeneralizedNewtypeDeriving #-}
{-# LANGUAGE LambdaCase #-}
{-# LANGUAGE NoImplicitPrelude #-}
{-# LANGUAGE OverloadedStrings #-}
{-# LANGUAGE RecordWildCards #-}
{-# LANGUAGE TypeFamilies #-}
{-# OPTIONS_GHC -fno-warn-unused-imports #-}
-- Module : Network.AWS.Route53Domains.UpdateDomainNameservers
-- Copyright : (c) 2013-2014 Brendan Hay <brendan.g.hay@gmail.com>
-- License : This Source Code Form is subject to the terms of
-- the Mozilla Public License, v. 2.0.
-- A copy of the MPL can be found in the LICENSE file or
-- you can obtain it at http://mozilla.org/MPL/2.0/.
-- Maintainer : Brendan Hay <brendan.g.hay@gmail.com>
-- Stability : experimental
-- Portability : non-portable (GHC extensions)
--
-- Derived from AWS service descriptions, licensed under Apache 2.0.
-- | This operation replaces the current set of name servers for the domain with
-- the specified set of name servers. If you use Amazon Route 53 as your DNS
-- service, specify the four name servers in the delegation set for the hosted
-- zone for the domain.
--
-- If successful, this operation returns an operation ID that you can use to
-- track the progress and completion of the action. If the request is not
-- completed successfully, the domain registrant will be notified by email.
--
-- <http://docs.aws.amazon.com/Route53/latest/APIReference/api-UpdateDomainNameservers.html>
module Network.AWS.Route53Domains.UpdateDomainNameservers
(
-- * Request
UpdateDomainNameservers
-- ** Request constructor
, updateDomainNameservers
-- ** Request lenses
, udnDomainName
, udnFIAuthKey
, udnNameservers
-- * Response
, UpdateDomainNameserversResponse
-- ** Response constructor
, updateDomainNameserversResponse
-- ** Response lenses
, udnrOperationId
) where
import Network.AWS.Prelude
import Network.AWS.Request.JSON
import Network.AWS.Route53Domains.Types
import qualified GHC.Exts
data UpdateDomainNameservers = UpdateDomainNameservers
{ _udnDomainName :: Text
, _udnFIAuthKey :: Maybe Text
, _udnNameservers :: List "Nameservers" Nameserver
} deriving (Eq, Read, Show)
-- | 'UpdateDomainNameservers' constructor.
--
-- The fields accessible through corresponding lenses are:
--
-- * 'udnDomainName' @::@ 'Text'
--
-- * 'udnFIAuthKey' @::@ 'Maybe' 'Text'
--
-- * 'udnNameservers' @::@ ['Nameserver']
--
updateDomainNameservers :: Text -- ^ 'udnDomainName'
-> UpdateDomainNameservers
updateDomainNameservers p1 = UpdateDomainNameservers
{ _udnDomainName = p1
, _udnFIAuthKey = Nothing
, _udnNameservers = mempty
}
-- | The name of a domain.
--
-- Type: String
--
-- Default: None
--
-- Constraints: The domain name can contain only the letters a through z, the
-- numbers 0 through 9, and hyphen (-). Internationalized Domain Names are not
-- supported.
--
-- Required: Yes
udnDomainName :: Lens' UpdateDomainNameservers Text
udnDomainName = lens _udnDomainName (\s a -> s { _udnDomainName = a })
-- | The authorization key for .fi domains
udnFIAuthKey :: Lens' UpdateDomainNameservers (Maybe Text)
udnFIAuthKey = lens _udnFIAuthKey (\s a -> s { _udnFIAuthKey = a })
-- | A list of new name servers for the domain.
--
-- Type: Complex
--
-- Children: 'Name', 'GlueIps'
--
-- Required: Yes
udnNameservers :: Lens' UpdateDomainNameservers [Nameserver]
udnNameservers = lens _udnNameservers (\s a -> s { _udnNameservers = a }) . _List
newtype UpdateDomainNameserversResponse = UpdateDomainNameserversResponse
{ _udnrOperationId :: Text
} deriving (Eq, Ord, Read, Show, Monoid, IsString)
-- | 'UpdateDomainNameserversResponse' constructor.
--
-- The fields accessible through corresponding lenses are:
--
-- * 'udnrOperationId' @::@ 'Text'
--
updateDomainNameserversResponse :: Text -- ^ 'udnrOperationId'
-> UpdateDomainNameserversResponse
updateDomainNameserversResponse p1 = UpdateDomainNameserversResponse
{ _udnrOperationId = p1
}
-- | Identifier for tracking the progress of the request. To use this ID to query
-- the operation status, use GetOperationDetail.
--
-- Type: String
--
-- Default: None
--
-- Constraints: Maximum 255 characters.
udnrOperationId :: Lens' UpdateDomainNameserversResponse Text
udnrOperationId = lens _udnrOperationId (\s a -> s { _udnrOperationId = a })
instance ToPath UpdateDomainNameservers where
toPath = const "/"
instance ToQuery UpdateDomainNameservers where
toQuery = const mempty
instance ToHeaders UpdateDomainNameservers
instance ToJSON UpdateDomainNameservers where
toJSON UpdateDomainNameservers{..} = object
[ "DomainName" .= _udnDomainName
, "FIAuthKey" .= _udnFIAuthKey
, "Nameservers" .= _udnNameservers
]
instance AWSRequest UpdateDomainNameservers where
type Sv UpdateDomainNameservers = Route53Domains
type Rs UpdateDomainNameservers = UpdateDomainNameserversResponse
request = post "UpdateDomainNameservers"
response = jsonResponse
instance FromJSON UpdateDomainNameserversResponse where
parseJSON = withObject "UpdateDomainNameserversResponse" $ \o -> UpdateDomainNameserversResponse
<$> o .: "OperationId"
| dysinger/amazonka | amazonka-route53-domains/gen/Network/AWS/Route53Domains/UpdateDomainNameservers.hs | mpl-2.0 | 5,485 | 0 | 10 | 1,104 | 616 | 382 | 234 | 68 | 1 |
{-# LANGUAGE DataKinds #-}
{-# LANGUAGE DeriveDataTypeable #-}
{-# LANGUAGE DeriveGeneric #-}
{-# LANGUAGE FlexibleInstances #-}
{-# LANGUAGE NoImplicitPrelude #-}
{-# LANGUAGE OverloadedStrings #-}
{-# LANGUAGE RecordWildCards #-}
{-# LANGUAGE TypeFamilies #-}
{-# LANGUAGE TypeOperators #-}
{-# OPTIONS_GHC -fno-warn-duplicate-exports #-}
{-# OPTIONS_GHC -fno-warn-unused-binds #-}
{-# OPTIONS_GHC -fno-warn-unused-imports #-}
-- |
-- Module : Network.Google.Resource.Content.Accounttax.Patch
-- Copyright : (c) 2015-2016 Brendan Hay
-- License : Mozilla Public License, v. 2.0.
-- Maintainer : Brendan Hay <brendan.g.hay@gmail.com>
-- Stability : auto-generated
-- Portability : non-portable (GHC extensions)
--
-- Updates the tax settings of the account. This method can only be called
-- for accounts to which the managing account has access: either the
-- managing account itself or sub-accounts if the managing account is a
-- multi-client account. This method supports patch semantics.
--
-- /See:/ <https://developers.google.com/shopping-content Content API for Shopping Reference> for @content.accounttax.patch@.
module Network.Google.Resource.Content.Accounttax.Patch
(
-- * REST Resource
AccounttaxPatchResource
-- * Creating a Request
, accounttaxPatch
, AccounttaxPatch
-- * Request Lenses
, appMerchantId
, appPayload
, appAccountId
, appDryRun
) where
import Network.Google.Prelude
import Network.Google.ShoppingContent.Types
-- | A resource alias for @content.accounttax.patch@ method which the
-- 'AccounttaxPatch' request conforms to.
type AccounttaxPatchResource =
"content" :>
"v2" :>
Capture "merchantId" (Textual Word64) :>
"accounttax" :>
Capture "accountId" (Textual Word64) :>
QueryParam "dryRun" Bool :>
QueryParam "alt" AltJSON :>
ReqBody '[JSON] AccountTax :>
Patch '[JSON] AccountTax
-- | Updates the tax settings of the account. This method can only be called
-- for accounts to which the managing account has access: either the
-- managing account itself or sub-accounts if the managing account is a
-- multi-client account. This method supports patch semantics.
--
-- /See:/ 'accounttaxPatch' smart constructor.
data AccounttaxPatch = AccounttaxPatch'
{ _appMerchantId :: !(Textual Word64)
, _appPayload :: !AccountTax
, _appAccountId :: !(Textual Word64)
, _appDryRun :: !(Maybe Bool)
} deriving (Eq,Show,Data,Typeable,Generic)
-- | Creates a value of 'AccounttaxPatch' with the minimum fields required to make a request.
--
-- Use one of the following lenses to modify other fields as desired:
--
-- * 'appMerchantId'
--
-- * 'appPayload'
--
-- * 'appAccountId'
--
-- * 'appDryRun'
accounttaxPatch
:: Word64 -- ^ 'appMerchantId'
-> AccountTax -- ^ 'appPayload'
-> Word64 -- ^ 'appAccountId'
-> AccounttaxPatch
accounttaxPatch pAppMerchantId_ pAppPayload_ pAppAccountId_ =
AccounttaxPatch'
{ _appMerchantId = _Coerce # pAppMerchantId_
, _appPayload = pAppPayload_
, _appAccountId = _Coerce # pAppAccountId_
, _appDryRun = Nothing
}
-- | The ID of the managing account.
appMerchantId :: Lens' AccounttaxPatch Word64
appMerchantId
= lens _appMerchantId
(\ s a -> s{_appMerchantId = a})
. _Coerce
-- | Multipart request metadata.
appPayload :: Lens' AccounttaxPatch AccountTax
appPayload
= lens _appPayload (\ s a -> s{_appPayload = a})
-- | The ID of the account for which to get\/update account tax settings.
appAccountId :: Lens' AccounttaxPatch Word64
appAccountId
= lens _appAccountId (\ s a -> s{_appAccountId = a})
. _Coerce
-- | Flag to run the request in dry-run mode.
appDryRun :: Lens' AccounttaxPatch (Maybe Bool)
appDryRun
= lens _appDryRun (\ s a -> s{_appDryRun = a})
instance GoogleRequest AccounttaxPatch where
type Rs AccounttaxPatch = AccountTax
type Scopes AccounttaxPatch =
'["https://www.googleapis.com/auth/content"]
requestClient AccounttaxPatch'{..}
= go _appMerchantId _appAccountId _appDryRun
(Just AltJSON)
_appPayload
shoppingContentService
where go
= buildClient
(Proxy :: Proxy AccounttaxPatchResource)
mempty
| rueshyna/gogol | gogol-shopping-content/gen/Network/Google/Resource/Content/Accounttax/Patch.hs | mpl-2.0 | 4,480 | 0 | 15 | 1,045 | 583 | 344 | 239 | 86 | 1 |
--
-- Copyright 2017-2018 Azad Bolour
-- Licensed under GNU Affero General Public License v3.0 -
-- https://github.com/azadbolour/boardgame/blob/master/LICENSE.md
--
{-# LANGUAGE NamedFieldPuns #-}
{-# LANGUAGE DisambiguateRecordFields #-}
{-# LANGUAGE RecordWildCards #-}
{-# LANGUAGE FlexibleContexts #-}
{-# LANGUAGE ScopedTypeVariables #-}
module BoardGame.Server.Domain.CrossWordFinder (
findStripCrossWords
, findCrossPlays
, findCrossPlay
) where
import Data.Maybe (fromJust, isNothing, catMaybes)
import Data.List (transpose, find)
import qualified Bolour.Language.Util.WordUtil as WordUtil
import qualified Bolour.Plane.Domain.Axis as Axis
import qualified Bolour.Plane.Domain.Point as Point
import Bolour.Plane.Domain.Axis (Axis)
import Bolour.Plane.Domain.Point (Point, Point(Point))
import qualified BoardGame.Common.Domain.PiecePoint
import BoardGame.Common.Domain.PiecePoint (PiecePoint, PiecePoint(PiecePoint))
import qualified BoardGame.Common.Domain.Piece as Piece
import BoardGame.Common.Domain.PlayPiece (PlayPiece, MoveInfo)
import qualified BoardGame.Common.Domain.PlayPiece as PlayPiece
import BoardGame.Server.Domain.Board (Board)
import qualified BoardGame.Server.Domain.Board as Board
import BoardGame.Server.Domain.Strip (Strip, Strip(Strip))
import qualified BoardGame.Server.Domain.Strip as Strip
import qualified Data.ByteString.Char8 as BS
forward = Axis.forward
backward = Axis.backward
findStripCrossWords :: Board -> Strip -> String -> [String]
findStripCrossWords board (strip @ Strip {axis, content}) word =
let range = [0 .. length word - 1]
crossingIndices = filter (\i -> WordUtil.isBlankChar $ content !! i) range
calcCrossing :: Int -> Maybe String = \i ->
let point = Strip.pointAtOffset strip i
playedChar = word !! i
in findSurroundingWord board point playedChar (Axis.crossAxis axis)
crossingStrings = catMaybes (calcCrossing <$> crossingIndices)
crossingWords = filter (\w -> length w > 1) crossingStrings
in crossingWords
findSurroundingWord :: Board -> Point -> Char -> Axis -> Maybe String
findSurroundingWord board point letter axis =
let play = findCrossPlay board point letter axis
in ((\(char, _, _) -> char) <$>) <$> play
-- | Find the surrounding cross play to a given move (provided as
-- the point and movingLetter parameters).
--
-- Note that the only moving piece in a cross play is the one
-- at the given crossing point.
-- Note also that the moving piece has yet to be placed on the board.
findCrossPlay :: Board -> Point -> Char -> Axis -> Maybe [MoveInfo]
findCrossPlay board point movingLetter crossAxis =
let crossingMoveInfo = (movingLetter, point, True)
forthNeighbors = Board.lineNeighbors board point crossAxis Axis.forward
backNeighbors = Board.lineNeighbors board point crossAxis Axis.backward
moveInfo neighbor =
let PiecePoint {piece, point} = neighbor
in (Piece.value piece, point, False)
in if null backNeighbors && null forthNeighbors then Nothing
else Just $ (moveInfo <$> backNeighbors) ++ [crossingMoveInfo] ++ (moveInfo <$> forthNeighbors)
-- | Not used for now but is needed when scores of cross plays figure in the total score.
findCrossPlays :: Board -> [PlayPiece] -> [[MoveInfo]]
findCrossPlays board playPieces =
let -- TODO. Internal error if fromJust fails.
strip = fromJust $ Board.stripOfPlay board playPieces
word = PlayPiece.playPiecesToWord playPieces
in findCrossPlays' board strip word
findCrossPlays' :: Board -> Strip -> String -> [[MoveInfo]]
findCrossPlays' board (strip @ Strip {axis, content}) word =
let range = [0 .. length word - 1]
crossingIndices = filter (\i -> WordUtil.isBlankChar $ content !! i) range
calcCrossing :: Int -> Maybe [MoveInfo] = \i ->
let point = Strip.pointAtOffset strip i
playedChar = word !! i
in findCrossPlay board point playedChar (Axis.crossAxis axis)
crossingPlays = calcCrossing <$> crossingIndices
in catMaybes crossingPlays
| azadbolour/boardgame | haskell-server/src/BoardGame/Server/Domain/CrossWordFinder.hs | agpl-3.0 | 4,063 | 2 | 16 | 704 | 998 | 569 | 429 | 69 | 2 |
{-# OPTIONS_GHC -fno-warn-orphans #-}
module Test.SwiftNav.SBP.Encoding
( tests
) where
import BasicPrelude
import Data.ByteString as BS
import qualified Data.Aeson as A
import SwiftNav.SBP.Encoding ()
import Test.QuickCheck
import Test.Tasty
import Test.Tasty.HUnit
import Test.Tasty.QuickCheck as QC
instance Arbitrary BS.ByteString where
arbitrary = BS.pack <$> arbitrary
shrink xs = BS.pack <$> shrink (BS.unpack xs)
instance CoArbitrary BS.ByteString where
coarbitrary = coarbitrary . BS.unpack
testParse :: TestTree
testParse =
testGroup "Empty Test"
[ testCase "Empty data" $ do
(A.decode . A.encode $ ("dddd" :: BS.ByteString)) @?= Just ["dddd" :: String]
]
testRoundtrip :: TestTree
testRoundtrip = QC.testProperty "Aeson" prop
where prop ws = (A.decode . A.encode $ (ws :: BS.ByteString)) === Just [ws]
tests :: TestTree
tests = testGroup "Roundtrip JSON serialization"
[ testRoundtrip
, testParse
]
| paparazzi/libsbp | haskell/test/Test/SwiftNav/SBP/Encoding.hs | lgpl-3.0 | 977 | 0 | 14 | 190 | 277 | 157 | 120 | 28 | 1 |
-- mod from https://github.com/8c6794b6/haskell-sc-scratch/blob/master/Scratch/FP/PFDS/ElimAmort/Scheduled.hs
module Scheduled where
newtype Schedule a = Schedule [[a]]
type Less a = a -> a -> Bool
data Sortable a = Sortable
{ less :: Less a
, size :: Int
, segments :: [([a],Schedule a)] }
merge :: Less a -> [a] -> [a] -> [a]
merge f xs ys = case (xs,ys) of
([],_) -> ys
(_,[]) -> xs
(x:xs', y:ys')
| f x y -> x : merge f xs' ys
| otherwise -> y : merge f xs ys'
exec1 :: Schedule a -> Schedule a
exec1 (Schedule xs) = Schedule (exec1' xs) where
exec1' ys = case ys of
[] -> []
[]:sched -> exec1' sched
(y:ys'):sched -> y `seq` ys' : sched
exec2PerSeg :: [([a], Schedule a)] -> [([a], Schedule a)]
exec2PerSeg xs = case xs of
[] -> []
(ys,sched):segs -> (ys, exec1 (exec1 sched)) : exec2PerSeg segs
new :: Less a -> Sortable a
new f = Sortable f 0 []
add :: a -> Sortable a -> Sortable a
add x (Sortable f sz segs) =
let addSeg xs sgs z (Schedule rsched)
| z `mod` 2 == 0 = (xs, Schedule (reverse (xs:rsched))) : sgs
| otherwise =
let (xs', _) : sgs' = sgs
in addSeg (merge f xs xs') sgs' (z `div` 2) (Schedule (xs:rsched))
segs' = addSeg [x] segs sz (Schedule [])
in Sortable f (succ sz) (exec2PerSeg segs')
sort :: Sortable a -> [a]
sort (Sortable f _ segs) =
let mergeAll xs [] = xs
mergeAll xs ((xs',_):sgs) = mergeAll (merge f xs xs') sgs
in mergeAll [] segs
| prt2121/haskell-practice | okasaki/src/Scheduled.hs | apache-2.0 | 1,497 | 0 | 16 | 400 | 776 | 405 | 371 | 40 | 3 |
-- |
-- Stability: stable
--
-- Hspec is a testing framework for Haskell.
--
-- This is the library reference for Hspec.
-- The <http://hspec.github.io/ User's Manual> contains more in-depth
-- documentation.
module Test.Hspec (
-- * Types
Spec
, SpecWith
, Arg
, Example
-- * Setting expectations
, module Test.Hspec.Expectations
-- * Defining a spec
, describe
, context
, it
, specify
, example
, pending
, pendingWith
, parallel
, runIO
-- * Hooks
, ActionWith
, before
, before_
, beforeWith
, beforeAll
, after
, after_
, afterAll
, afterAll_
, around
, around_
, aroundWith
-- * Running a spec
, hspec
) where
import Test.Hspec.Core.Spec
import Test.Hspec.Core.Hooks
import Test.Hspec.Runner
import Test.Hspec.Expectations
-- | @example@ is a type restricted version of `id`. It can be used to get better
-- error messages on type mismatches.
--
-- Compare e.g.
--
-- > it "exposes some behavior" $ example $ do
-- > putStrLn
--
-- with
--
-- > it "exposes some behavior" $ do
-- > putStrLn
example :: Expectation -> Expectation
example = id
-- | @context@ is an alias for `describe`.
context :: String -> SpecWith a -> SpecWith a
context = describe
-- | @specify@ is an alias for `it`.
specify :: Example a => String -> a -> SpecWith (Arg a)
specify = it
| holoed/Junior | lib/hspec-2.1.5/src/Test/Hspec.hs | apache-2.0 | 1,318 | 0 | 10 | 282 | 218 | 144 | 74 | 38 | 1 |
module Data.Integrated.Partition where
import qualified Data.List as L
import Data.Integrated.TestModule (Map)
import qualified Data.Map as M
-- RETRO:
-- I'm curious if a OTS data structure is available to otherwise represent/enforce this
data Partition = Partition {
equal :: Map, displacing :: Map, added :: Map
}
-- RETRO:
-- I'm wondering if there is a templated solution to handle list-tuple boiler
-- plate types like the below
instance Show (Partition) where
show p =
L.intercalate "\n" $
"partition:\n":
map
(\t -> fst t ++ " : " ++ (show . M.toList . snd) t)
[("equal", equal p), ("displacing", displacing p), ("added", added p)]
| jfeltz/tasty-integrate | src/Data/Integrated/Partition.hs | bsd-2-clause | 681 | 0 | 15 | 145 | 174 | 103 | 71 | 13 | 0 |
{-# LANGUAGE GADTs, OverloadedStrings, TypeOperators #-}
module Main where
import Control.Natural ((:~>), wrapNT)
import Control.Remote.Monad.JSON
import Control.Remote.Monad.JSON.Router(transport,router,Call(..),methodNotFound)
import Data.Aeson
import Data.Text(Text)
-- Our small DSL
say :: Text -> RPC ()
say msg = notification "say" (List [String msg])
temperature :: RPC Int
temperature = method "temperature" None
-- Our remote program
main :: IO ()
main = do
let s = weakSession network
t <- send s $ do
say "Hello, "
say "World!"
temperature
print t
-- Simulate the JSON-RPC server
network :: SendAPI :~> IO
network = transport $ router sequence $ wrapNT remote
where
remote :: Call a -> IO a
remote (CallMethod "temperature" _) = return $ Number 42
remote (CallNotification "say" (List [String msg])) = print msg
remote _ = methodNotFound
| ku-fpg/remote-json | front/Main.hs | bsd-3-clause | 989 | 0 | 13 | 267 | 297 | 155 | 142 | 25 | 3 |
module Main where
import Pretty
main :: IO ()
main = interact prettyHS
| Zane-XY/haskell-pretty | src/Main.hs | bsd-3-clause | 72 | 0 | 6 | 14 | 25 | 14 | 11 | 4 | 1 |
module ChessTools.Test.Board
where
import Control.Applicative ((<$>), (<*>))
import Control.Monad (join, liftM)
import Data.List (group, sort)
import Data.Maybe (fromJust)
import Test.QuickCheck
import ChessTools.Board
import ChessTools.Board.Internal
import ChessTools.Test.Utils
-- | For some of the more complex tests (there's at least one function that is
-- O(n^4), for example), it's more feasible to only generate small realistic
-- board sizes. An upper bound of 11 by 11 is arbitrarily used here.
smallBoardGen :: Gen BoardSize
smallBoardGen = sized $ \n ->
resize (min n 11) boardSizeGen
genBadSquare :: BoardSize -> Gen Square
genBadSquare (BoardSize h v _) = oneof [badX, badY]
where badX = do
sx <- oneof [choose (-5, -1), choose (h, h + 5)]
sy <- choose (-5, v + 5)
return $ Square (sx, sy)
badY = do
sx <- choose (-5, h + 5)
sy <- oneof [choose (-5, -1), choose (v, v + 5)]
return $ Square (sx, sy)
genTwoSquares :: BoardSize -> Gen (Square, Square)
genTwoSquares bs = (,) <$> genSquare bs <*> genSquare bs
boardAndSquareGen :: Gen (BoardSize, Square)
boardAndSquareGen = do
bs <- boardSizeGen
sq <- genSquare bs
return (bs, sq)
boardAndTwoSquareGen :: Gen (BoardSize, Square, Square)
boardAndTwoSquareGen = do
bs <- boardSizeGen
s1 <- genSquare bs
s2 <- genSquare bs
return (bs, s1, s2)
boardAndBadSquareGen :: Gen (BoardSize, Square)
boardAndBadSquareGen = do
bs <- boardSizeGen
sq <- genBadSquare bs
return (bs, sq)
-- XXX: It's a little annoying that this is precisely how squareToIndex is
-- implemented, so it's not really verifying the result of that conversion by
-- different means.
boardAndIndexGen :: Gen (BoardSize, BIndex)
boardAndIndexGen = do
bs <- boardSizeGen
idx <- genIndex bs
return (bs, idx)
boardAndBadIndexGen :: Gen (BoardSize, BIndex)
boardAndBadIndexGen = do
bs <- boardSizeGen
idx <- genBadIndex bs
return (bs, idx)
-- The squareToIndex and indexToSquare functions should be inverses of each
-- other. That is:
-- index -> square -> index should be the identity
-- square -> index -> square should be the identity
prop_indexToSquareInverse :: Property
prop_indexToSquareInverse = forAll boardAndIndexGen $ \(b, idx) ->
join (squareToIndex b `liftM` indexToSquare b idx) == Just idx
prop_squareToIndexInverse :: Property
prop_squareToIndexInverse = forAll boardAndSquareGen $ \(b, sq) ->
join (indexToSquare b `liftM` squareToIndex b sq) == Just sq
-- squareToIndex and indexToSquare should handle bad input appropriately.
prop_errorSquareToIndex :: Property
prop_errorSquareToIndex = forAll boardAndBadSquareGen $ \(b, sq) ->
squareToIndex b sq == Nothing
prop_errorIndexToSquare :: Property
prop_errorIndexToSquare = forAll boardAndBadIndexGen $ \(b, idx) ->
indexToSquare b idx == Nothing
-- As squares move from lower left ("a1" in western chess) to upper right (h8),
-- the index into the lookup table should increase.
prop_indexIncreasesWithSquare :: Property
prop_indexIncreasesWithSquare = forAll boardAndTwoSquareGen $ \(b, s1, s2) ->
let idx1 = squareToIndex b s1
idx2 = squareToIndex b s2
in s1 `compare` s2 == idx1 `compare` idx2
-- The board array size should be computed correctly (this is the
-- representation of the board of pieces, not a lookup array, which is smaller).
prop_boardArraySize :: Property
prop_boardArraySize = forAll boardSizeGen $ \b ->
let BoardSize h v vbuf = b
expected = h * v + v * (h - 1) + 2 * vbuf * (2 * h - 1)
in boardArraySize b == expected
-- The list returned from repIndexList should actually be representative. That
-- is, it should contain as many values as the size of the lookup array and all
-- of the distance values in it should be unique.
prop_repIndexListRepresents :: Property
prop_repIndexListRepresents = forAll smallBoardGen $ \bs ->
let cl@(CL xs) = repIndexList bs
(l, u) = lookupBounds cl
in length xs == fromLI u - fromLI l + 1 &&
(length . group . sort $ map fst xs) == length xs
-- Check that file, rank and square distance lookups are evaluated correctly.
-- For speed purposes, we check each of these against two fixed board sizes
-- (one square and one not). This avoids having to continually regenerate the
-- representative index list.
board1, board2 :: BoardSize
board1 = BoardSize 8 8 2
board2 = BoardSize 8 9 2
repList1, repList2 :: CoveringIndexList
repList1 = repIndexList board1
repList2 = repIndexList board2
fTable1, fTable2, rTable1, rTable2, sTable1, sTable2 :: LookupTable
fTable1 = fileTable repList1
fTable2 = fileTable repList2
rTable1 = rankTable repList1
rTable2 = rankTable repList2
sTable1 = squareTable repList1
sTable2 = squareTable repList2
type SquareCmpFunc = Square -> Square -> Int
fileCheckFunc, rankCheckFunc, squareCheckFunc :: SquareCmpFunc
fileCheckFunc (Square s1) (Square s2) = abs $ fst s1 - fst s2
rankCheckFunc (Square s1) (Square s2) = abs $ snd s1 - snd s2
squareCheckFunc sq1 sq2 = max (fileCheckFunc sq1 sq2) (rankCheckFunc sq1 sq2)
checkLookup :: LookupTable -> SquareCmpFunc -> BoardSize -> Property
checkLookup lt cmp b = forAll (genTwoSquares b) $ \(sq1, sq2) ->
let idx1 = fromJust $ squareToIndex b sq1
idx2 = fromJust $ squareToIndex b sq2
in fetch lt idx1 idx2 == cmp sq1 sq2
prop_checkFileDistance1 :: Property
prop_checkFileDistance1 = checkLookup fTable1 fileCheckFunc board1
prop_checkFileDistance2 :: Property
prop_checkFileDistance2 = checkLookup fTable2 fileCheckFunc board2
prop_checkRankDistance1 :: Property
prop_checkRankDistance1 = checkLookup rTable1 rankCheckFunc board1
prop_checkRankDistance2 :: Property
prop_checkRankDistance2 = checkLookup rTable2 rankCheckFunc board2
prop_checkSquareDistance1 :: Property
prop_checkSquareDistance1 = checkLookup sTable1 squareCheckFunc board1
prop_checkSquareDistance2 :: Property
prop_checkSquareDistance2 = checkLookup sTable2 squareCheckFunc board2
| malcolmt/chess-tools | src/ChessTools/Test/Board.hs | bsd-3-clause | 6,052 | 0 | 16 | 1,156 | 1,562 | 829 | 733 | 113 | 1 |
{-# LANGUAGE FlexibleContexts #-}
{-# LANGUAGE ScopedTypeVariables #-}
{-# LANGUAGE TypeFamilies #-}
{-# OPTIONS_HADDOCK hide #-}
module Network.Xmpp.Concurrent.IQ where
import Control.Applicative ((<$>))
import Control.Concurrent (forkIO)
import Control.Concurrent.STM
import Control.Concurrent.Thread.Delay (delay)
import Control.Monad
import Control.Monad.Error
import Control.Monad.Trans
import qualified Data.Map as Map
import Data.Maybe
import Data.Text (Text)
import Data.XML.Pickle
import Data.XML.Types
import Lens.Family2 (toListOf, (&), (^.))
import Network.Xmpp.Concurrent.Basic
import Network.Xmpp.Concurrent.Types
import Network.Xmpp.Lens
import Network.Xmpp.Stanza
import Network.Xmpp.Types
import System.Log.Logger
-- | Sends an IQ, returns an STM action that returns the first inbound IQ with a
-- matching ID that has type @result@ or @error@ or Nothing if the timeout was
-- reached.
--
-- When sending the action fails, an XmppFailure is returned.
sendIQ :: Maybe Integer -- ^ Timeout . When the timeout is reached the response
-- TMVar will be filled with 'IQResponseTimeout' and the
-- id is removed from the list of IQ handlers. 'Nothing'
-- deactivates the timeout
-> Maybe Jid -- ^ Recipient (to)
-> IQRequestType -- ^ IQ type (@Get@ or @Set@)
-> Maybe LangTag -- ^ Language tag of the payload (@Nothing@ for
-- default)
-> Element -- ^ The IQ body (there has to be exactly one)
-> [ExtendedAttribute] -- ^ Additional stanza attributes
-> Session
-> IO (Either XmppFailure (STM (Maybe (Annotated IQResponse))))
sendIQ timeOut t tp lang body attrs session = do
newId <- idGenerator session
j <- case t of
Just t -> return $ Right t
Nothing -> Left <$> getJid session
ref <- atomically $ do
resRef <- newEmptyTMVar
let value = (j, resRef)
(byNS, byId) <- readTVar (iqHandlers session)
writeTVar (iqHandlers session) (byNS, Map.insert newId value byId)
return resRef
res <- sendStanza (IQRequestS $ IQRequest newId Nothing t lang tp body attrs)
session
case res of
Right () -> do
case timeOut of
Nothing -> return ()
Just t -> void . forkIO $ do
delay t
doTimeOut (iqHandlers session) newId ref
return . Right $ readTMVar ref
Left e -> return $ Left e
where
doTimeOut handlers iqid var = atomically $ do
p <- tryPutTMVar var Nothing
when p $ do
(byNS, byId) <- readTVar (iqHandlers session)
writeTVar handlers (byNS, Map.delete iqid byId)
return ()
-- | Like 'sendIQ', but waits for the answer IQ.
sendIQA' :: Maybe Integer
-> Maybe Jid
-> IQRequestType
-> Maybe LangTag
-> Element
-> [ExtendedAttribute]
-> Session
-> IO (Either IQSendError (Annotated IQResponse))
sendIQA' timeout to tp lang body attrs session = do
ref <- sendIQ timeout to tp lang body attrs session
either (return . Left . IQSendError) (fmap (maybe (Left IQTimeOut) Right)
. atomically) ref
-- | Like 'sendIQ', but waits for the answer IQ. Discards plugin Annotations
sendIQ' :: Maybe Integer
-> Maybe Jid
-> IQRequestType
-> Maybe LangTag
-> Element
-> [ExtendedAttribute]
-> Session
-> IO (Either IQSendError IQResponse)
sendIQ' timeout to tp lang body attrs session =
fmap fst <$> sendIQA' timeout to tp lang body attrs session
-- | Register your interest in inbound IQ stanzas of a specific type and
-- namespace. The returned STM action yields the received, matching IQ stanzas.
--
-- If a handler for IQ stanzas with the given type and namespace is already
-- registered, the producer will be wrapped in Left. In this case the returned
-- request tickets may already be processed elsewhere.
listenIQ :: IQRequestType -- ^ Type of IQs to receive ('Get' or 'Set')
-> Text -- ^ Namespace of the child element
-> Session
-> IO (Either (STM IQRequestTicket) (STM IQRequestTicket))
listenIQ tp ns session = do
let handlers = (iqHandlers session)
atomically $ do
(byNS, byID) <- readTVar handlers
iqCh <- newTChan
let (present, byNS') = Map.insertLookupWithKey'
(\_ _ old -> old)
(tp, ns)
iqCh
byNS
writeTVar handlers (byNS', byID)
case present of
Nothing -> return . Right $ readTChan iqCh
Just iqCh' -> do
clonedChan <- cloneTChan iqCh'
return . Left $ readTChan clonedChan
-- | Unregister a previously registered IQ handler. No more IQ stanzas will be
-- delivered to any of the returned producers.
unlistenIQ :: IQRequestType -- ^ Type of IQ ('Get' or 'Set')
-> Text -- ^ Namespace of the child element
-> Session
-> IO ()
unlistenIQ tp ns session = do
let handlers = (iqHandlers session)
atomically $ do
(byNS, byID) <- readTVar handlers
let byNS' = Map.delete (tp, ns) byNS
writeTVar handlers (byNS', byID)
return ()
-- | Answer an IQ request. Only the first answer ist sent and Just True is
-- returned when the answer is sucessfully sent. If an error occured during
-- sending Just False is returned (and another attempt can be
-- undertaken). Subsequent answers after the first sucessful one are dropped and
-- (False is returned in that case)
answerIQ :: IQRequestTicket
-> Either StanzaError (Maybe Element)
-> [ExtendedAttribute]
-> IO (Maybe (Either XmppFailure ()))
answerIQ = answerTicket
-- Class
class IQRequestClass a where
data IQResponseType a
pickleRequest :: PU Element a
pickleResponse :: PU [Element] (IQResponseType a)
requestType :: a -> IQRequestType
requestNamespace :: a -> Text
data IQRequestError = IQRequestSendError XmppFailure
| IQRequestTimeout
| IQRequestUnpickleError UnpickleError
deriving Show
-- | Send an IQ request. May throw IQSendError, UnpickleError,
sendIQRequest :: (IQRequestClass a, MonadError IQRequestError m, MonadIO m) =>
Maybe Integer
-> Maybe Jid
-> a
-> Session
-> m (Either IQError (IQResponseType a))
sendIQRequest timeout t req con = do
mbRes <- liftIO $ sendIQ' timeout t (requestType req) Nothing
(pickle pickleRequest req) [] con
case mbRes of
Left (IQTimeOut) -> throwError IQRequestTimeout
Left (IQSendError e) -> throwError $ IQRequestSendError e
Right (IQResponseError e) -> return $ Left e
Right (IQResponseResult res) ->
case unpickle pickleResponse (res & toListOf payloadT) of
Left e -> throwError $ IQRequestUnpickleError e
Right r -> return $ Right r
type IQRequestHandler a = a -> IO (Either StanzaError (IQResponseType a))
runIQHandler :: IQRequestClass a =>
IQRequestHandler a
-> Session
-> IO ()
runIQHandler (handler :: a -> IO (Either StanzaError (IQResponseType a)))
sess = do
let prx = undefined :: a
ns = (requestNamespace prx)
mbChan <- listenIQ (requestType prx) ns sess
case mbChan of
Left _ -> warningM "Pontarius.Xmpp" $ "IQ namespace " ++ show ns
++ " is already handled"
Right getNext -> forever $ do
ticket <- atomically getNext
case unpickle pickleRequest (iqRequestBody ticket ^. payload) of
Left _ -> answerIQ ticket (Left $ mkStanzaError BadRequest) []
Right req -> do
res <- handler req
case res of
Left e -> answerIQ ticket (Left e) []
Right r -> do
let answer = (pickle pickleResponse r)
answerIQ ticket (Right $ listToMaybe answer ) []
| Philonous/pontarius-xmpp | source/Network/Xmpp/Concurrent/IQ.hs | bsd-3-clause | 8,502 | 0 | 27 | 2,765 | 1,970 | 991 | 979 | 169 | 5 |
{-# LANGUAGE CPP #-}
{-# LANGUAGE PackageImports, NoImplicitPrelude #-}
{-# LANGUAGE ScopedTypeVariables #-}
{-# LANGUAGE NoMonomorphismRestriction #-}
{-# LANGUAGE RankNTypes #-}
{-# LANGUAGE FlexibleContexts #-}
{-# LANGUAGE ViewPatterns #-}
{-# LANGUAGE RecordWildCards #-}
{-# LANGUAGE GADTs #-}
module Main where
import Control.Applicative
-- import "monad-param" Control.Monad.Parameterized
import Control.Monad
import Data.List
import Data.Maybe
import Data.Monoid
import System.Cmd
import System.Environment
import System.Exit
import System.IO
import System.Posix.Process
import System.Posix.Signals
import System.Console.GetOpt
import Text.Printf
import Text.Regex.Posix
import Text.XHtml as Html
import Prelude -- hiding (Monad(..))
import qualified Prelude as P
import ArgumentFiltering (bestHeu, typeHeu, typeHeu2, innermost)
import PrologProblem
import TRS.FetchRules
import TRS.FetchRules.TRS
import Types hiding ((!))
import DPairs
import Utils
import qualified Solver
import Solver
import Proof hiding (problem)
import GraphViz
import NarrowingProblem
import Control.Monad.Free
import Aprove
returnM = return
main :: IO ()
main = do
#ifndef GHCI
installHandler sigALRM (Catch (putStrLn "timeout" >> exitImmediately (ExitFailure (-1)))) Nothing
#endif
(Options problemFile (NiceSolver solver pprSol) diagrams, _, errors) <- getOptions
sol <- runProofT solver
putStrLn$ if isSuccess sol then "YES" else "NO"
when diagrams $ withTempFile "." "narradar.dot" $ \fp h -> do
hPutStrLn h (pprSol sol)
hFlush h
system (printf "dot -Tpdf %s -o %s.pdf " fp problemFile)
-- hPutStrLn stderr (printf "Log written to %s.pdf" file)
return ()
-- ------------------------------
-- Command Line Options handling
-- ------------------------------
usage = "Narradar - Automated Narrowing Termination Proofs"
getOptions = do
args <- getArgs
let (actions, nonOptions, errors) = getOpt Permute opts args
flags0@Flags{..} <- foldl (>>=) (return defFlags) actions
let solverFlag' = if (".pl" `isSuffixOf` problemFile) && isNothing solverFlag then "PL" else fromMaybe "N" solverFlag
problemFile = fromMaybe "INPUT" (listToMaybe nonOptions)
someSolver = parseSolver solverFlag'
input <- maybe getContents readFile (listToMaybe nonOptions)
return (Options problemFile (someSolver problemFile input) diagramsFlag, nonOptions, errors)
-- data Options where Options :: (TRSC f, Ppr f) => FilePath -> PPT id f Html IO -> Bool -> Options
data Options = Options { problemFile :: FilePath
, solver :: NiceSolver
, diagrams :: Bool
}
data Flags id = Flags { solverFlag :: Maybe String
, diagramsFlag :: Bool
}
defFlags = Flags{ solverFlag = Nothing
, diagramsFlag = True
}
--opts :: [OptDescr (Flags f id -> Flags f id)]
opts = [ Option "s" ["solver"] (ReqArg (\arg opts -> returnM opts{solverFlag = Just arg}) "DESC") "DESC = N | BN | PL [LOCAL path|WEB|SRV timeout] (default: automatic)"
, Option "" ["nodiagrams"] (NoArg $ \opts -> returnM opts{diagramsFlag = False}) "Do not produce a pdf proof file"
, Option "t" ["timeout"] (ReqArg (\arg opts -> scheduleAlarm (read arg) >> return opts) "SECONDS") "Timeout in seconds (default:none)"
, Option "h?" ["help"] (NoArg (\ _ -> putStrLn(usageInfo usage opts) >> exitSuccess)) "Displays this help screen"
]
type LSolver id f = ProblemG id f -> PPT LId BBasicLId Html IO
data SomeSolver where LabelSolver :: ProblemType Id -> LSolver Id BBasicId -> SomeSolver
SimpleSolver :: ProblemType LId -> LSolver LId BBasicLId -> SomeSolver
data SolverType id f where Labeller :: SolverType Id BBasicId
Simple :: SolverType LId BBasicLId
data NiceSolver where NiceSolver :: (TRSC f, Ppr f) => PPT id f Html IO -> (ProblemProofG id Html f -> String) -> NiceSolver
getSolver :: SomeSolver -> SolverType id f -> Maybe (ProblemType id, LSolver id f)
getSolver (LabelSolver typ s) Labeller = Just (typ, s)
getSolver (SimpleSolver typ s) Simple = Just (typ, s)
getSolver _ _ = Nothing
parseTRS :: ProblemType Id -> FilePath -> String -> PPT Id BasicId Html IO
parseTRS typ file txt = wrap' $ do
rules :: [Rule Basic] <- eitherIO$ parseFile trsParser file txt
let trs = mkTRS rules :: NarradarTRS String Basic'
return (mkGoalProblem bestHeu AllTerms $ mkDPProblem Narrowing trs)
parseProlog :: String -> PPT String Basic' Html IO
parseProlog = wrap' . return . either error return . parsePrologProblem
parseSolver "N" file txt = NiceSolver (parseTRS Narrowing file txt >>= narradarSolver) pprDot
parseSolver "BN" file txt = NiceSolver (parseTRS BNarrowing file txt >>= narradarSolver) pprDot
parseSolver "PL" file txt = NiceSolver (parseProlog txt >>= prologSolver) pprDot
parseSolver "PL_one" file txt = NiceSolver (parseProlog txt >>= prologSolver_one) pprDot
parseSolver "PL_inn" file txt = NiceSolver (parseProlog txt >>= prologSolver' (\_ _ -> innermost) (aproveSrvP 30)) pprDot
--parseSolver "PL_typ2" file txt = NiceSolver (parseProlog txt >>= prologSolver' ((typeHeu2.) . const) (aproveSrvP 30)) pprDot
{-
--parseSolver "PL_rhs" = LabelSolver Prolog prologSolver_rhs
parseSolver "PL_noL" = LabelSolver Prolog prologSolver_noL
parseSolver "PL_inn" = LabelSolver Prolog $ prologSolver' (\_ _ -> innermost) (aproveSrvP 30)
parseSolver "PL_typ2"= LabelSolver Prolog $ prologSolver' ((typeHeu2.) . const) (aproveSrvP 30)
parseSolver "PL_typ" = LabelSolver Prolog $ prologSolver' ((typeHeu.) . const) (aproveSrvP 30)
parseSolver ('P':'L':' ': (parseAprove -> Just k)) = LabelSolver Prolog (prologSolver' ((typeHeu.) . const) k)
--parseSolver ('P':'L':'_':'r':'h':'s':' ': (parseAprove -> Just k)) = LabelSolver Prolog (prologSolver_rhs' k)
parseSolver ('P':'L':'_':'o':'n':'e':' ': (parseAprove -> Just k)) = LabelSolver Prolog (prologSolver_one' ((typeHeu.) . const) k)
parseSolver _ = error "Could not parse the description. Expected (LOCAL path|WEB|SRV timeout)"
-}
parseAprove = go1 where
go0 _ = Nothing
go1 (prefixedBy "SRV" -> Just timeout) | [(t,[])] <- reads timeout = Just (aproveSrvP t)
go1 (prefixedBy "WEB" -> Just []) = Just aproveWebP
go1 (prefixedBy "LOCAL "-> Just path) = Just $ aproveLocalP path
go1 _ = Nothing
prefixedBy [] xx = Just xx
prefixedBy (p:pp) (x:xx) | p == x = prefixedBy pp xx
| otherwise = Nothing
eitherIO = either (error.show) return
| pepeiborra/narradar | src/Main.hs | bsd-3-clause | 6,802 | 0 | 16 | 1,505 | 1,604 | 845 | 759 | 106 | 4 |
module System35.Test.Base (assertEqual', assertEqualSeq) where
import Test.HUnit
import Data.Word (Word8)
import qualified Data.ByteString as S
import Control.Monad (unless, foldM)
assertEqualSeq :: (Eq a, Show a) => String -> [a] -> [a] -> Assertion
assertEqualSeq preface expected actual =
unless (actual == expected) (assertFailure msg)
where msg = (if null preface then "" else preface ++ "\n") ++
"position: " ++ show pos ++ "\n" ++
case pos >= 10 of
True -> "expected: .." ++ lastMatch pos expected ++
"\n but got: .." ++ lastMatch pos actual
False -> case len <= 10 of
True -> "expected: " ++ show (take (pos+1) expected) ++
"\n but got: " ++ show (take (pos+1) actual)
False -> "expected: [" ++ show' (take (pos+1) expected) ++
"\n but got: [" ++ show' (take (pos+1) actual)
pos = findErrorPos expected actual 0
lastMatch pos seq = show' $ take (pos-1) (drop (pos-9) seq)
len = max (length expected) (length actual)
show' [] = "..."
show' (x:xs) = show x ++ "," ++ show' xs
assertEqual' :: String -> S.ByteString -> S.ByteString -> Assertion
assertEqual' preface expected actual =
unless (actual == expected) (assertFailure msg)
where msg = (if null preface then "" else preface ++ "\n") ++
"position: " ++ show pos ++ "\n" ++
case pos >= 10 of
True -> "expected: .." ++ lastMatch pos (S.unpack (expected)) ++
"\n but got: .." ++ lastMatch pos (S.unpack (actual))
False -> "expected: " ++ show (take (pos+1) (S.unpack (expected))) ++
"\n but got: " ++ show (take (pos+1) (S.unpack (actual)))
pos = findErrorPos (S.unpack expected) (S.unpack actual) 0
lastMatch pos seq = show' $ take (pos-1) (drop (pos-9) seq)
show' [] = ",.."
show' (x:xs) = "," ++ show x ++ show' xs
findErrorPos :: (Eq a) => [a] -> [a] -> Int -> Int
findErrorPos [] [] _ = 0
findErrorPos (e:es) (a:as) x
| (e == a) = findErrorPos es as (x+1)
| otherwise = x
findErrorCnt :: (Eq a) => [a] -> [a] -> Int -> Int
findErrorCnt [] [] x = x
findErrorCnt (e:es) (a:as) x
| (e == a) = findErrorCnt es as x
| otherwise = findErrorCnt es as (x+1)
| smly/haskell-xsystem | System35/Test/Base.hs | bsd-3-clause | 2,619 | 0 | 21 | 967 | 999 | 515 | 484 | 47 | 5 |
module CutElimination where
import Search.Types
eliminateCut :: CutProof -> CutFreeProof
eliminateCut cp = case cp of
FMap f cp -> _
At f cp -> _
Cut cp cp' -> _
Axiom t -> _
| imeckler/mote | Search/CutElimination.hs | bsd-3-clause | 192 | 0 | 8 | 52 | 72 | 36 | 36 | 8 | 4 |
{-# LANGUAGE OverloadedStrings #-}
module LogReader.Reader (readExercise) where
import Data.Aeson
import Data.Maybe
import Data.List
import Data.Text (Text)
import Data.Text.Lazy.Encoding
import System.Directory
import System.FilePath
import LogReader.Types
import qualified Data.Text as T
import qualified Data.Text.IO as IO
import qualified Data.String.Conversions as C
-- List folder contents with absolute paths.
folderContent :: Path -> IO ([Path])
folderContent p = do
dirs <- getDirectoryContents p
let subDirs = filter (not . flip elem [".", ".."]) $ sort dirs
fullDirs <- mapM (canonicalizePath . (p </>)) subDirs
return (fullDirs)
-- Read log file.
readLog :: Path -> IO (Maybe Log)
readLog p = do
content <- IO.readFile p
let time = T.pack $ takeFileName p
decodeEncoded = decode . encodeUtf8 . C.cs
case decode $ C.cs content :: Maybe [Text] of
Just ((l:_)) -> return (fmap (Log time) $ decodeEncoded l)
_ -> return Nothing
-- Load user from logs.
loadUser :: Path -> IO User
loadUser p = do
files <- folderContent p
logs <- catMaybes <$> mapM readLog files
let n = T.pack $ takeFileName p
return (User n logs)
-- Load task from logs.
loadTask :: Path -> IO Task
loadTask p = do
userFolders <- folderContent p
users <- mapM loadUser userFolders
let n = T.pack $ takeFileName p
return (Task n users)
-- Read exercise from logs.
readExercise :: Path -> IO Exercise
readExercise p = do
taskFolders <- folderContent p
ts <- mapM loadTask taskFolders
let n = T.pack $ takeFileName p
return (Exercise n ts)
| keveri/logreader | src/LogReader/Reader.hs | bsd-3-clause | 1,632 | 0 | 15 | 366 | 567 | 285 | 282 | 45 | 2 |
module Cipher where
import Data.Char
shift :: Int -> Char -> Char
shift n x = chr $ (mod ((ord x) + n) (orda + 26)) + orda
where orda = ord 'a'
blah = shift 1 'z'
caesar :: Int -> String -> String
caesar n = fmap $ shift n
| taojang/haskell-programming-book-exercise | src/ch09/Cipher.hs | bsd-3-clause | 229 | 0 | 12 | 58 | 116 | 61 | 55 | 8 | 1 |
{-# LANGUAGE ScopedTypeVariables #-}
import Test.Hspec
import Test.Cabal.Path
import Text.Regex.Posix
import Control.Exception
shouldReturnRegex :: IO String -> String -> IO ()
shouldReturnRegex exe pattern = do
v <- exe
(v =~ pattern) `shouldBe` True
main :: IO ()
main = hspec $ do
describe "library-test" $ do
it "getExePath" $ do
(getExePath "." "cabal-test-bin" `shouldReturn` "./dist/build/cabal-test-bin/cabal-test-bin")
`catch`
(\(_::SomeException) -> (getExePath "." "cabal-test-bin" `shouldReturnRegex` "./dist/dist-sandbox-(.*)/cabal-test-bin/cabal-test-bin"))
it "getExeDir" $ do
(getExeDir "." "cabal-test-bin" `shouldReturn` "./dist/build/cabal-test-bin")
`catch`
(\(_::SomeException) -> (getExeDir "." "cabal-test-bin" `shouldReturnRegex` "./dist/dist-sandbox-(.*)/cabal-test-bin"))
| junjihashimoto/cabal-test-bin | tests/test.hs | bsd-3-clause | 850 | 0 | 17 | 133 | 236 | 125 | 111 | 20 | 1 |
{-# LANGUAGE MultiParamTypeClasses, FlexibleInstances, ScopedTypeVariables, InstanceSigs, AllowAmbiguousTypes, FlexibleContexts #-}
module Platform.CleanTest where
import Spec.Machine
import Spec.Decode
import Utility.Utility
import Spec.CSRFileIO
import qualified Spec.CSRField as Field
import Data.Bits
import Data.Int
import Data.Word
import Data.Char
import Data.Maybe
import Data.IORef
import Data.Array.IO
import System.Posix.Types
import System.IO.Error
import qualified Data.Map.Strict as S
import Control.Monad.State
import Control.Monad.Trans.Maybe
import Debug.Trace as T
import Platform.Pty
import Platform.Plic
import Platform.Clint
import Control.Concurrent.MVar
data VerifMinimal64 = VerifMinimal64 { registers :: IOUArray Register Int64 ,
fpregisters :: IOUArray Register Int32,
csrs :: CSRFile,
privMode :: IORef PrivMode,
pc :: IORef Int64,
nextPC :: IORef Int64,
mem :: IOUArray Int Word8,
plic :: Plic,
clint :: (IORef Int64, MVar Int64),
console :: (MVar [Word8], Fd),
reservation :: IORef (Maybe Int),
-- Verification Packet
exception :: IORef Bool,
interrupt :: IORef Bool,
valid_dst :: IORef Bool,
valid_addr :: IORef Bool,
instruction :: IORef Int32,
cause :: IORef Int32,
d :: IORef Word64,
dst :: IORef Int64,
addrPacket :: IORef Word64,
pcPacket :: IORef Int64,
valid_timer :: IORef Bool,
timer :: IORef Int64,
mipPacket:: IORef Int64
}
type IOState = StateT VerifMinimal64 IO
type LoadFunc = IOState Int32
type StoreFunc = Int32 -> IOState ()
rvGetChar :: IOState Int32
rvGetChar = do
refs <- get
mWord <- liftIO $ readPty (fst (console refs))
lift $ putStrLn "Get Char happened"
case mWord of
Nothing -> return $ -1
Just a -> return $ fromIntegral a
rvPutChar :: Int32 -> IOState ()
rvPutChar val = do
refs <- get
liftIO $ writePty (snd (console refs)) (fromIntegral val)
rvZero = return 0
rvNull val = return ()
getMTime :: LoadFunc
getMTime = undefined
-- Ignore writes to mtime.
setMTime :: StoreFunc
setMTime _ = return ()
readPlicWrap addr = do
refs <- get
(val, interrupt) <- lift $ readPlic (plic refs) addr
when (interrupt == Set) (do
lift $ putStrLn "Set external interrupt from read"
setCSRField Field.MEIP 1)
when (interrupt == Reset) (do
lift $ putStrLn "Reset external initerrupt"
setCSRField Field.MEIP 0)
return val
writePlicWrap addr val = do
refs <- get
interrupt <- lift $ writePlic (plic refs) addr val
when (interrupt == Set) (do
lift $ putStrLn "Set external interrupt from write"
setCSRField Field.MEIP 1)
when (interrupt == Reset) (do
lift $ putStrLn "Reset external interrupt from write"
setCSRField Field.MEIP 0)
return ()
readClintWrap addr = do
refs <- get
let (mtimecmp,rtc) = clint refs
mint <- lift $ readClint mtimecmp rtc addr
lift $ writeIORef (valid_timer refs) True
when (addr == 0xbff8) . lift . writeIORef (timer refs) . fromIntegral . fromJust $ mint
-- lift . putStrLn $ "readClint " ++ show mint ++ " at addr " ++ show addr
case mint of
Just a -> return a
Nothing -> return 0 --Impossible
writeClintWrap addr val = do
refs <- get
let (mtimecmp,rtc) = clint refs
lift . putStrLn $ "writeClint " ++ show val ++ " at addr " ++ show addr
lift $ writeClint mtimecmp addr val
setCSRField Field.MTIP 0
-- Addresses for mtime/mtimecmp chosen for Spike compatibility, and getchar putchar.
memMapTable :: S.Map MachineInt (LoadFunc, StoreFunc)
memMapTable = S.fromList
[
-- Pty
(0xfff0, (rvZero, rvPutChar)),
(0xfff4, (rvGetChar, rvNull)),
-- Plic
(0x4000000, (readPlicWrap 0x200000, writePlicWrap 0x200000)),
(0x4000004, (readPlicWrap 0x200004, writePlicWrap 0x200004)),
-- Clint
(0x2000000, (fmap fromIntegral $ getCSRField Field.MSIP, setCSRField Field.SSIP)),
(0x200bff8, (readClintWrap 0xbff8, writeClintWrap 0xbff8)),
(0x200bffc, (readClintWrap 0xbffc, writeClintWrap 0xbffc)),
(0x2004000, (readClintWrap 0x4000, writeClintWrap 0x4000)),
(0x2004004, (readClintWrap 0x4004, writeClintWrap 0x4004))
]
mtimecmp_addr = 0x4000 :: Int64
instance RiscvMachine IOState Int64 where
getRegister reg = do
if reg == 0
then return 0
else do
refs <- get
lift $! readArray (registers refs) reg
setRegister reg val = do
refs <- get
if reg == 0
then do
-- lift $ writeIORef (valid_dst refs) True
-- lift $ writeIORef (dst refs) reg
-- lift $ writeIORef (d refs) $ fromIntegral val
return ()
else do
lift $ writeIORef (valid_dst refs) True
lift $ writeIORef (dst refs) reg
lift $ writeIORef (d refs) $ fromIntegral val
lift $! writeArray (registers refs) reg val
getFPRegister reg = do
refs <- get
lift $! readArray (fpregisters refs) reg
setFPRegister reg val = do
refs <- get
lift $! writeArray (fpregisters refs) reg val
getPC = do
refs <- get
lift $! readIORef (pc refs)
setPC npc = do
refs <- get
lift $! writeIORef (nextPC refs) npc
getPrivMode = do
refs <- get
lift $! readIORef (privMode refs)
setPrivMode val = do
refs <- get
lift $! writeIORef (privMode refs) val
commit = do
refs <- get
npc <- lift $ readIORef (nextPC refs)
lift $! writeIORef (pc refs) npc
-- -- Wrap Memory instance:
loadByte s addr =
case S.lookup (fromIntegral addr) memMapTable of
Just _ -> error "loadByte on MMIO unsupported"
Nothing -> do
refs <- get
fmap fromIntegral . lift $ readArray (mem refs) (fromIntegral addr)
loadHalf s addr =
case S.lookup (fromIntegral addr) memMapTable of
Just _ -> error "loadHalf on MMIO unsupported"
Nothing -> do
refs <- get
b0 <- lift . readArray (mem refs) $ fromIntegral addr
b1 <- lift . readArray (mem refs) $ fromIntegral (addr + 1)
return (combineBytes [b0,b1])
loadWord :: forall s. (Integral s) => SourceType -> s -> IOState Int32
loadWord s ad = do
val <- (case S.lookup ((fromIntegral:: s -> MachineInt) ad) memMapTable of
Just (getFunc, _) -> getFunc
Nothing -> do
refs <- get
b0 <- lift . readArray (mem refs) $! fromIntegral ad
b1 <- lift . readArray (mem refs) $! fromIntegral (ad + 1)
b2 <- lift . readArray (mem refs) $! fromIntegral (ad + 2)
b3 <- lift . readArray (mem refs) $! fromIntegral (ad + 3)
return (combineBytes [b0,b1,b2,b3]))
return val
loadDouble s addr = do
res_bot <- loadWord s addr
res_top <- loadWord s (addr+4)
let bytes_bot = splitWord res_bot
let bytes_top = splitWord res_top
return (combineBytes $ bytes_bot ++ bytes_top)
storeByte s addr val =
case S.lookup (fromIntegral addr) memMapTable of
Just _ -> error "storeByte on MMIO unsupported"
Nothing -> do
refs <- get
lift $ writeArray (mem refs) (fromIntegral addr) (fromIntegral val) -- Convert from Int8 to Word8
storeHalf s addr val =
case S.lookup (fromIntegral addr) memMapTable of
Just _ -> error "storeHald on MMIO unsupported"
Nothing -> do
let bytes = splitHalf val
refs <- get
forM_ (zip bytes [addr + i| i<- [0..]]) $ (\(x,addr)-> lift $ writeArray (mem refs) (fromIntegral addr) (fromIntegral x))
storeWord :: forall s. (Integral s, Bits s) => SourceType -> s -> Int32 -> IOState ()
storeWord s addr val = do
refs <- get
lift $ writeIORef (valid_addr refs) True
lift $ writeIORef (addrPacket refs) $ fromIntegral addr
lift $ writeIORef (d refs) . fromIntegral $ (fromIntegral val :: Word32)
-- when (addr >= 0x2000000 && addr < 0x20c0000) .lift $ putStrLn ("write to the clint: " ++ show ( fromIntegral addr))
case S.lookup ((fromIntegral:: s -> MachineInt) addr) memMapTable of
Just (_, setFunc) -> setFunc val
Nothing -> do
let bytes = splitWord val
-- refs <- get
forM_ (zip bytes [addr + i| i<- [0..]]) $ (\(x,addr)-> lift $ writeArray (mem refs) (fromIntegral addr) (fromIntegral x))
storeDouble s addr val =
case (S.lookup (fromIntegral addr) memMapTable,S.lookup (fromIntegral (addr+4)) memMapTable) of
(Just (_, setFunc1 ),Just (_, setFunc2 )) -> do
setFunc1 $ fromIntegral (val .&. 0xFFFFFFFF)
setFunc2 $ fromIntegral (shiftR val 32)
(Nothing, Nothing) -> do
let bytes = splitDouble val
refs <- get
forM_ (zip bytes [addr + i| i<- [0..]]) $ (\(x,addr)-> lift $ writeArray (mem refs) (fromIntegral addr) (fromIntegral x))
_ -> error "storeDouble half within MMIO, half without that's SOOOO wrong"
makeReservation addr = do
refs <- get
lift $ writeIORef (reservation refs) (Just $ fromIntegral addr)
checkReservation addr = do
refs <- get
res <- lift $ readIORef (reservation refs)
return (Just (fromIntegral addr) == res)
clearReservation addr = do
refs <- get
lift $ writeIORef (reservation refs) Nothing
fence a b = return ()
-- -- CSRs:
getCSRField field = do
refs <- get
lift $! readArray (csrs refs) field
unsafeSetCSRField :: forall s. (Integral s) => Field.CSRField -> s -> IOState ()
unsafeSetCSRField field val = do -- CSRS are not refs in there, because I am lazy.
refs <- get
lift $! writeArray (csrs refs) field ((fromIntegral:: s -> MachineInt) val)
flushTLB = return ()
getPlatform = return (Platform { dirtyHardware = return False, writePlatformCSRField = \field value -> return value })
| mit-plv/riscv-semantics | src/Platform/CleanTest.hs | bsd-3-clause | 10,866 | 0 | 20 | 3,573 | 3,384 | 1,686 | 1,698 | 242 | 2 |
module Main where
import Control.Lens
import Control.Monad.Catch
import Control.Monad.Except
import Data.Proxy
import Data.Text
import Data.Text.Encoding
import Foreign.C
import System.IO
import System.IO.Temp
import Game.GoreAndAsh.Core
import Game.GoreAndAsh.Logging
import Game.GoreAndAsh.Resource
import Game.GoreAndAsh.Time
import Game.GoreAndAsh.SDL
import qualified SDL
import qualified SDL.Image as SDLI
import SDL (get)
import qualified Data.ByteString.Lazy as BS
type AppStack t = ResourceT t (SDLT t (LoggingT t (TimerT t (GameMonad t))))
newtype AppMonad t a = AppMonad { runAppMonad :: AppStack t a}
deriving (Functor, Applicative, Monad, MonadFix)
instance Resource Texture where
type ResourceArg Texture = Renderer
readResource r name bs = withSystemTempFile name $ \path h -> do
BS.hPut h bs
hFlush h
img <- SDLI.loadTexture r path
return $ Right img -- TODO: handle exceptions and put them in Left
app :: forall t m . (TimerMonad t m, MonadResource t m, LoggingMonad t m, MonadSDL t m) => m ()
app = do
tickE <- tickEveryN (realToFrac (1 :: Double)) 1 never
logInfoE $ ffor tickE $ const "TIIIICK!"
rec
let
errE :: Event t Text
errE = fforMaybe loadedE $ \e -> case e of
Left s -> Just s
_ -> Nothing
succE :: Event t Texture
succE = fforMaybe loadedE $ \e -> case e of
Right s -> Just s
_ -> Nothing
logWarnE $ ffor errE showl
texDyn <- holdDyn Nothing $ fmap Just succE
win <- createMainWindow (const () <$> succE) (drawFrame texDyn) defaultWindowCfg
loadedE <- loadResource $ ffor tickE $ const (win ^. windowRenderer, "kassa.png")
return ()
main :: IO ()
main = runSpiderHost $ hostApp $ runModule opts (app :: AppMonad Spider ())
opts :: ResourceOptions ()
opts = ResourceOptions {
resourceOptsPrefix = "./media"
, resourceOptsNext = ()
}
-- Boilerplate below
deriving instance (ReflexHost t, MonadCatch (HostFrame t)) => MonadCatch (AppMonad t)
deriving instance (ReflexHost t, MonadThrow (HostFrame t)) => MonadThrow (AppMonad t)
deriving instance (ReflexHost t, MonadMask (HostFrame t)) => MonadMask (AppMonad t)
deriving instance (ReflexHost t, MonadIO (HostFrame t)) => MonadIO (AppMonad t)
deriving instance (ReflexHost t, MonadIO (HostFrame t)) => MonadResource t (AppMonad t)
deriving instance (ReflexHost t, MonadIO (HostFrame t)) => TimerMonad t (AppMonad t)
deriving instance (ReflexHost t, MonadIO (HostFrame t)) => LoggingMonad t (AppMonad t)
deriving instance (ReflexHost t) => MonadSample t (AppMonad t)
deriving instance (ReflexHost t) => MonadHold t (AppMonad t)
deriving instance (ReflexHost t) => MonadSubscribeEvent t (AppMonad t)
deriving instance (ReflexHost t, MonadIO (HostFrame t), MonadCatch (HostFrame t)) => MonadSDL t (AppMonad t)
instance ReflexHost t => MonadReflexCreateTrigger t (AppMonad t) where
newEventWithTrigger = AppMonad . newEventWithTrigger
newFanEventWithTrigger trigger = AppMonad $ newFanEventWithTrigger trigger
instance (ReflexHost t, MonadIO (HostFrame t)) => MonadAppHost t (AppMonad t) where
getFireAsync = AppMonad getFireAsync
getRunAppHost = do
runner <- AppMonad getRunAppHost
return $ \m -> runner $ runAppMonad m
performPostBuild_ = AppMonad . performPostBuild_
liftHostFrame = AppMonad . liftHostFrame
instance (ReflexHost t, MonadIO (HostFrame t)) => GameModule t (AppMonad t) where
type ModuleOptions t (AppMonad t) = ModuleOptions t (AppStack t)
runModule os m = runModule os $ runAppMonad m
withModule t _ = withModule t (Proxy :: Proxy (AppStack t))
drawFrame :: forall t . (ReflexHost t, MonadIO (HostFrame t))
=> Dynamic t (Maybe Texture) -> Window -> Renderer -> HostFrame t ()
drawFrame texDyn win r = do
rendererDrawColor r $= V4 0 0 0 0
clear r
mtex <- sample . current $ texDyn
case mtex of
Nothing -> return ()
Just tex -> SDL.copy r tex Nothing Nothing
glSwapWindow win
where
getCurrentSize :: HostFrame t (V2 CInt)
getCurrentSize = do
vp <- get (rendererViewport r)
case vp of
Nothing -> return 0
Just (Rectangle _ s) -> return s
resizeRect :: V2 CInt -> Rectangle Double -> Rectangle CInt
resizeRect (V2 vw vh) (Rectangle (P (V2 x y)) (V2 w h)) = Rectangle (P (V2 x' y')) (V2 w' h')
where
x' = round $ x * fromIntegral vw
y' = round $ y * fromIntegral vh
w' = round $ w * fromIntegral vw
h' = round $ h * fromIntegral vh
| Teaspot-Studio/gore-and-ash-resource | examples/Example015.hs | bsd-3-clause | 4,511 | 0 | 17 | 959 | 1,685 | 855 | 830 | -1 | -1 |
module Game.Pong.Graphics where
import Data.Functor
import Game.Pong.Core
import Game.Util (sfVec2f,forceCleanup)
import Game.Vector
import Control.Monad.SFML (SFML)
import qualified SFML.Graphics as G
import qualified SFML.Window as W
import qualified Control.Monad.SFML.Window as WM
import qualified Control.Monad.SFML.Graphics as GM
import SFML.Graphics.Color (black,white)
data PongContext = PongContext {
pcWin :: G.RenderWindow,
pcFont :: G.Font
}
dashedLine :: (Vec2f,Vec2f) -> Float -> [W.Vec2f]
dashedLine (p1,p2) dashLen = verts
where
verts = take (nSegs*2) $ sfVec2f
<$> (makeDash
=<< iterate (+. step) init)
makeDash v = [v,v+.dash]
step = stepLen *. vnorm diff
dash = dashLen *. vnorm diff
init = p1 +. ((1/6+initFact/2)*stepLen) *. vnorm diff
(nSegs,initFact) = properFraction $ vmag diff / stepLen
stepLen = dashLen*1.5
diff = p2 -. p1
setVAContents :: G.VertexArray -> [G.Vertex] -> SFML ()
setVAContents va vs = do
GM.clearVA va
sequence_ $ GM.appendVA va <$> vs
drawScores :: (Int,Int) -> PongContext -> SFML ()
drawScores (l,r) (PongContext wnd fnt) = do
(W.Vec2f w h) <- GM.getViewSize =<< GM.getView wnd
let mid = w/2
let (lx,rx) = (0.8*mid,1.2*mid)
let y = negate $ 0.01*h -- .001*h
txt <- GM.createText
GM.setTextFont txt fnt
GM.setTextCharacterSize txt $ fromIntegral $ floor (0.1*h)
let (lText,rText) = (show l,show r)
GM.setTextString txt lText
(G.FloatRect _ _ tw th) <- GM.getTextLocalBounds txt
GM.setOrigin txt (W.Vec2f tw 0)
GM.setPosition txt $ W.Vec2f lx y
GM.drawText wnd txt Nothing
GM.setTextString txt rText
(G.FloatRect _ _ tw th) <- GM.getTextLocalBounds txt
GM.setOrigin txt (W.Vec2f tw 0)
GM.setPosition txt $ W.Vec2f rx y
GM.drawText wnd txt Nothing
drawMidLine :: G.RenderWindow -> SFML ()
drawMidLine wnd = do
(W.Vec2f w h) <- GM.getViewSize =<< GM.getView wnd
let mid = w/2
va <- GM.createVA
GM.setPrimitiveType va G.Lines
setVAContents va
$ (\p -> G.Vertex p white (W.Vec2f 0 0))
<$> dashedLine ((mid,0),(mid,h)) 80
GM.drawVertexArray wnd va Nothing
drawPong :: Pong -> PongContext -> SFML ()
drawPong p ctx@(PongContext wnd fnt) = forceCleanup $ do
let (w,h) = pScreenSize p
view <- GM.viewFromRect (G.FloatRect 0 0 w h)
GM.setView wnd view
GM.clearRenderWindow wnd black
drawScores (pLeftScore p,pRightScore p) ctx
drawMidLine wnd
rectShape <- GM.createRectangleShape
GM.setSize rectShape (sfVec2f $ pPaddleSize p)
let halfsize = 0.5 *. pPaddleSize p
GM.setPosition rectShape $ sfVec2f
$ paddleLoc (pLeftPaddle p) -. halfsize
GM.drawRectangle wnd rectShape Nothing
GM.setPosition rectShape $ sfVec2f
$ paddleLoc (pRightPaddle p) -. halfsize
GM.drawRectangle wnd rectShape Nothing
GM.setSize rectShape (sfVec2f $ pBallSize p)
let halfsize = 0.5 *. pBallSize p
GM.setPosition rectShape $ sfVec2f
$ ballLoc (pBall p) -. halfsize
GM.drawRectangle wnd rectShape Nothing
| Ginto8/Pong-Haskell | src/Game/Pong/Graphics.hs | bsd-3-clause | 3,248 | 0 | 14 | 843 | 1,267 | 630 | 637 | 82 | 1 |
module Utils.Array (sampleAtInterval) where
import Data.Array
sampleAtInterval :: Int -> Array Int a -> Array Int a
sampleAtInterval interval array = listArray (min, max) $ sample min []
where min = (fst . bounds) array
sourceMax = (snd . bounds) array
max = sourceMax `div` interval
sample i xs
| i <= sourceMax = sample (i + interval) $ (array ! i) : xs
| otherwise = reverse xs
| thlorenz/Pricetory | src/Utils/Array.hs | bsd-3-clause | 445 | 0 | 12 | 133 | 169 | 87 | 82 | 10 | 1 |
{-# LANGUAGE ScopedTypeVariables #-}
{-# LANGUAGE DeriveGeneric #-}
{-# LANGUAGE TypeFamilies #-}
{-# LANGUAGE FlexibleContexts #-}
-----------------------------------------------------------------------------
-- |
-- Module : Distribution.Client.InstallPlan
-- Copyright : (c) Duncan Coutts 2008
-- License : BSD-like
--
-- Maintainer : duncan@community.haskell.org
-- Stability : provisional
-- Portability : portable
--
-- Package installation plan
--
-----------------------------------------------------------------------------
module Distribution.Client.InstallPlan (
InstallPlan,
GenericInstallPlan,
PlanPackage,
GenericPlanPackage(..),
-- * Operations on 'InstallPlan's
new,
toList,
fromSolverInstallPlan,
configureInstallPlan,
ready,
processing,
completed,
failed,
remove,
preexisting,
preinstalled,
showPlanIndex,
showInstallPlan,
-- * Graph-like operations
reverseTopologicalOrder,
) where
import Distribution.Client.Types
import qualified Distribution.PackageDescription as PD
import qualified Distribution.Simple.Configure as Configure
import qualified Distribution.Simple.Setup as Cabal
import Distribution.InstalledPackageInfo
( InstalledPackageInfo )
import Distribution.Package
( PackageIdentifier(..), Package(..)
, HasUnitId(..), UnitId(..) )
import Distribution.Solver.Types.SolverPackage
import Distribution.Text
( display )
import qualified Distribution.Client.SolverInstallPlan as SolverInstallPlan
import Distribution.Client.SolverInstallPlan (SolverInstallPlan)
import Distribution.Solver.Types.ComponentDeps (ComponentDeps)
import qualified Distribution.Solver.Types.ComponentDeps as CD
import Distribution.Solver.Types.PackageFixedDeps
import Distribution.Solver.Types.Settings
import Distribution.Solver.Types.SolverId
-- TODO: Need this when we compute final UnitIds
-- import qualified Distribution.Simple.Configure as Configure
import Data.List
( foldl', intercalate )
import Data.Maybe
( catMaybes )
import qualified Distribution.Compat.Graph as Graph
import Distribution.Compat.Graph (Graph, IsNode(..))
import Distribution.Compat.Binary (Binary(..))
import GHC.Generics
import Control.Exception
( assert )
import qualified Data.Map as Map
import qualified Data.Traversable as T
-- When cabal tries to install a number of packages, including all their
-- dependencies it has a non-trivial problem to solve.
--
-- The Problem:
--
-- In general we start with a set of installed packages and a set of source
-- packages.
--
-- Installed packages have fixed dependencies. They have already been built and
-- we know exactly what packages they were built against, including their exact
-- versions.
--
-- Source package have somewhat flexible dependencies. They are specified as
-- version ranges, though really they're predicates. To make matters worse they
-- have conditional flexible dependencies. Configuration flags can affect which
-- packages are required and can place additional constraints on their
-- versions.
--
-- These two sets of package can and usually do overlap. There can be installed
-- packages that are also available as source packages which means they could
-- be re-installed if required, though there will also be packages which are
-- not available as source and cannot be re-installed. Very often there will be
-- extra versions available than are installed. Sometimes we may like to prefer
-- installed packages over source ones or perhaps always prefer the latest
-- available version whether installed or not.
--
-- The goal is to calculate an installation plan that is closed, acyclic and
-- consistent and where every configured package is valid.
--
-- An installation plan is a set of packages that are going to be used
-- together. It will consist of a mixture of installed packages and source
-- packages along with their exact version dependencies. An installation plan
-- is closed if for every package in the set, all of its dependencies are
-- also in the set. It is consistent if for every package in the set, all
-- dependencies which target that package have the same version.
-- Note that plans do not necessarily compose. You might have a valid plan for
-- package A and a valid plan for package B. That does not mean the composition
-- is simultaneously valid for A and B. In particular you're most likely to
-- have problems with inconsistent dependencies.
-- On the other hand it is true that every closed sub plan is valid.
-- | Packages in an install plan
--
-- NOTE: 'ConfiguredPackage', 'GenericReadyPackage' and 'GenericPlanPackage'
-- intentionally have no 'PackageInstalled' instance. `This is important:
-- PackageInstalled returns only library dependencies, but for package that
-- aren't yet installed we know many more kinds of dependencies (setup
-- dependencies, exe, test-suite, benchmark, ..). Any functions that operate on
-- dependencies in cabal-install should consider what to do with these
-- dependencies; if we give a 'PackageInstalled' instance it would be too easy
-- to get this wrong (and, for instance, call graph traversal functions from
-- Cabal rather than from cabal-install). Instead, see 'PackageFixedDeps'.
data GenericPlanPackage ipkg srcpkg iresult ifailure
= PreExisting ipkg
| Configured srcpkg
| Processing (GenericReadyPackage srcpkg)
| Installed (GenericReadyPackage srcpkg) (Maybe ipkg) iresult
| Failed srcpkg ifailure
deriving (Eq, Show, Generic)
instance (HasUnitId ipkg, PackageFixedDeps ipkg,
HasUnitId srcpkg, PackageFixedDeps srcpkg)
=> IsNode (GenericPlanPackage ipkg srcpkg iresult ifailure) where
type Key (GenericPlanPackage ipkg srcpkg iresult ifailure) = UnitId -- TODO: change me
nodeKey = installedUnitId
nodeNeighbors = CD.flatDeps . depends
instance (Binary ipkg, Binary srcpkg, Binary iresult, Binary ifailure)
=> Binary (GenericPlanPackage ipkg srcpkg iresult ifailure)
type PlanPackage = GenericPlanPackage
InstalledPackageInfo (ConfiguredPackage UnresolvedPkgLoc)
BuildSuccess BuildFailure
instance (Package ipkg, Package srcpkg) =>
Package (GenericPlanPackage ipkg srcpkg iresult ifailure) where
packageId (PreExisting ipkg) = packageId ipkg
packageId (Configured spkg) = packageId spkg
packageId (Processing rpkg) = packageId rpkg
packageId (Installed rpkg _ _) = packageId rpkg
packageId (Failed spkg _) = packageId spkg
instance (PackageFixedDeps srcpkg,
PackageFixedDeps ipkg) =>
PackageFixedDeps (GenericPlanPackage ipkg srcpkg iresult ifailure) where
depends (PreExisting pkg) = depends pkg
depends (Configured pkg) = depends pkg
depends (Processing pkg) = depends pkg
depends (Installed pkg _ _) = depends pkg
depends (Failed pkg _) = depends pkg
instance (HasUnitId ipkg, HasUnitId srcpkg) =>
HasUnitId
(GenericPlanPackage ipkg srcpkg iresult ifailure) where
installedUnitId (PreExisting ipkg ) = installedUnitId ipkg
installedUnitId (Configured spkg) = installedUnitId spkg
installedUnitId (Processing rpkg) = installedUnitId rpkg
-- NB: defer to the actual installed package info in this case
installedUnitId (Installed _ (Just ipkg) _) = installedUnitId ipkg
installedUnitId (Installed rpkg _ _) = installedUnitId rpkg
installedUnitId (Failed spkg _) = installedUnitId spkg
data GenericInstallPlan ipkg srcpkg iresult ifailure = GenericInstallPlan {
planIndex :: !(PlanIndex ipkg srcpkg iresult ifailure),
planIndepGoals :: !IndependentGoals
}
-- | 'GenericInstallPlan' specialised to most commonly used types.
type InstallPlan = GenericInstallPlan
InstalledPackageInfo (ConfiguredPackage UnresolvedPkgLoc)
BuildSuccess BuildFailure
type PlanIndex ipkg srcpkg iresult ifailure =
Graph (GenericPlanPackage ipkg srcpkg iresult ifailure)
invariant :: (HasUnitId ipkg, PackageFixedDeps ipkg,
HasUnitId srcpkg, PackageFixedDeps srcpkg)
=> GenericInstallPlan ipkg srcpkg iresult ifailure -> Bool
invariant plan =
valid (planIndepGoals plan)
(planIndex plan)
-- | Smart constructor that deals with caching the 'Graph' representation.
--
mkInstallPlan :: PlanIndex ipkg srcpkg iresult ifailure
-> IndependentGoals
-> GenericInstallPlan ipkg srcpkg iresult ifailure
mkInstallPlan index indepGoals =
GenericInstallPlan {
planIndex = index,
planIndepGoals = indepGoals
}
internalError :: String -> a
internalError msg = error $ "InstallPlan: internal error: " ++ msg
instance (HasUnitId ipkg, PackageFixedDeps ipkg,
HasUnitId srcpkg, PackageFixedDeps srcpkg,
Binary ipkg, Binary srcpkg, Binary iresult, Binary ifailure)
=> Binary (GenericInstallPlan ipkg srcpkg iresult ifailure) where
put GenericInstallPlan {
planIndex = index,
planIndepGoals = indepGoals
} = put (index, indepGoals)
get = do
(index, indepGoals) <- get
return $! mkInstallPlan index indepGoals
showPlanIndex :: (HasUnitId ipkg, HasUnitId srcpkg)
=> PlanIndex ipkg srcpkg iresult ifailure -> String
showPlanIndex index =
intercalate "\n" (map showPlanPackage (Graph.toList index))
where showPlanPackage p =
showPlanPackageTag p ++ " "
++ display (packageId p) ++ " ("
++ display (installedUnitId p) ++ ")"
showInstallPlan :: (HasUnitId ipkg, HasUnitId srcpkg)
=> GenericInstallPlan ipkg srcpkg iresult ifailure -> String
showInstallPlan = showPlanIndex . planIndex
showPlanPackageTag :: GenericPlanPackage ipkg srcpkg iresult ifailure -> String
showPlanPackageTag (PreExisting _) = "PreExisting"
showPlanPackageTag (Configured _) = "Configured"
showPlanPackageTag (Processing _) = "Processing"
showPlanPackageTag (Installed _ _ _) = "Installed"
showPlanPackageTag (Failed _ _) = "Failed"
-- | Build an installation plan from a valid set of resolved packages.
--
new :: (HasUnitId ipkg, PackageFixedDeps ipkg,
HasUnitId srcpkg, PackageFixedDeps srcpkg)
=> IndependentGoals
-> PlanIndex ipkg srcpkg iresult ifailure
-> Either [PlanProblem ipkg srcpkg iresult ifailure]
(GenericInstallPlan ipkg srcpkg iresult ifailure)
new indepGoals index =
case problems indepGoals index of
[] -> Right (mkInstallPlan index indepGoals)
probs -> Left probs
toList :: GenericInstallPlan ipkg srcpkg iresult ifailure
-> [GenericPlanPackage ipkg srcpkg iresult ifailure]
toList = Graph.toList . planIndex
-- | Remove packages from the install plan. This will result in an
-- error if there are remaining packages that depend on any matching
-- package. This is primarily useful for obtaining an install plan for
-- the dependencies of a package or set of packages without actually
-- installing the package itself, as when doing development.
--
remove :: (HasUnitId ipkg, PackageFixedDeps ipkg,
HasUnitId srcpkg, PackageFixedDeps srcpkg)
=> (GenericPlanPackage ipkg srcpkg iresult ifailure -> Bool)
-> GenericInstallPlan ipkg srcpkg iresult ifailure
-> Either [PlanProblem ipkg srcpkg iresult ifailure]
(GenericInstallPlan ipkg srcpkg iresult ifailure)
remove shouldRemove plan =
new (planIndepGoals plan) newIndex
where
newIndex = Graph.fromList $
filter (not . shouldRemove) (toList plan)
-- | The packages that are ready to be installed. That is they are in the
-- configured state and have all their dependencies installed already.
-- The plan is complete if the result is @[]@.
--
ready :: forall ipkg srcpkg iresult ifailure.
(HasUnitId ipkg, PackageFixedDeps ipkg,
HasUnitId srcpkg, PackageFixedDeps srcpkg)
=> GenericInstallPlan ipkg srcpkg iresult ifailure
-> [GenericReadyPackage srcpkg]
ready plan = assert check readyPackages
where
check = if null readyPackages && null processingPackages
then null configuredPackages
else True
configuredPackages = [ pkg | Configured pkg <- toList plan ]
processingPackages = [ pkg | Processing pkg <- toList plan]
readyPackages :: [GenericReadyPackage srcpkg]
readyPackages = catMaybes (map (lookupReadyPackage plan) configuredPackages)
lookupReadyPackage :: forall ipkg srcpkg iresult ifailure.
(HasUnitId ipkg, PackageFixedDeps ipkg,
HasUnitId srcpkg, PackageFixedDeps srcpkg)
=> GenericInstallPlan ipkg srcpkg iresult ifailure
-> srcpkg
-> Maybe (GenericReadyPackage srcpkg)
lookupReadyPackage plan pkg = do
_ <- hasAllInstalledDeps pkg
return (ReadyPackage pkg)
where
hasAllInstalledDeps :: srcpkg -> Maybe (ComponentDeps [ipkg])
hasAllInstalledDeps = T.mapM (mapM isInstalledDep) . depends
isInstalledDep :: UnitId -> Maybe ipkg
isInstalledDep pkgid =
case Graph.lookup pkgid (planIndex plan) of
Just (PreExisting ipkg) -> Just ipkg
Just (Configured _) -> Nothing
Just (Processing _) -> Nothing
Just (Installed _ (Just ipkg) _) -> Just ipkg
Just (Installed _ Nothing _) -> internalError (depOnNonLib pkgid)
Just (Failed _ _) -> internalError depOnFailed
Nothing -> internalError incomplete
incomplete = "install plan is not closed"
depOnFailed = "configured package depends on failed package"
depOnNonLib dep = "the configured package "
++ display (packageId pkg)
++ " depends on a non-library package "
++ display dep
-- | Marks packages in the graph as currently processing (e.g. building).
--
-- * The package must exist in the graph and be in the configured state.
--
processing :: forall ipkg srcpkg iresult ifailure.
(HasUnitId ipkg, PackageFixedDeps ipkg,
HasUnitId srcpkg, PackageFixedDeps srcpkg)
=> [GenericReadyPackage srcpkg]
-> GenericInstallPlan ipkg srcpkg iresult ifailure
-> GenericInstallPlan ipkg srcpkg iresult ifailure
processing pkgs plan = assert (invariant plan') plan'
where
plan' = plan {
planIndex = Graph.unionRight (planIndex plan) processingPkgs
}
processingPkgs :: PlanIndex ipkg srcpkg iresult ifailure
processingPkgs = Graph.fromList [Processing pkg | pkg <- pkgs]
-- | Marks a package in the graph as completed. Also saves the build result for
-- the completed package in the plan.
--
-- * The package must exist in the graph and be in the processing state.
-- * The package must have had no uninstalled dependent packages.
--
completed :: (HasUnitId ipkg, PackageFixedDeps ipkg,
HasUnitId srcpkg, PackageFixedDeps srcpkg)
=> UnitId
-> Maybe ipkg -> iresult
-> GenericInstallPlan ipkg srcpkg iresult ifailure
-> GenericInstallPlan ipkg srcpkg iresult ifailure
completed pkgid mipkg buildResult plan = assert (invariant plan') plan'
where
plan' = plan {
planIndex = Graph.insert installed
. Graph.deleteKey pkgid
$ planIndex plan
}
installed = Installed (lookupProcessingPackage plan pkgid) mipkg buildResult
-- | Marks a package in the graph as having failed. It also marks all the
-- packages that depended on it as having failed.
--
-- * The package must exist in the graph and be in the processing
-- state.
--
failed :: (HasUnitId ipkg, PackageFixedDeps ipkg,
HasUnitId srcpkg, PackageFixedDeps srcpkg)
=> UnitId -- ^ The id of the package that failed to install
-> ifailure -- ^ The build result to use for the failed package
-> ifailure -- ^ The build result to use for its dependencies
-> GenericInstallPlan ipkg srcpkg iresult ifailure
-> GenericInstallPlan ipkg srcpkg iresult ifailure
failed pkgid buildResult buildResult' plan = assert (invariant plan') plan'
where
-- NB: failures don't update IPIDs
plan' = plan {
planIndex = Graph.unionRight (planIndex plan) failures
}
ReadyPackage srcpkg = lookupProcessingPackage plan pkgid
failures = Graph.fromList
$ Failed srcpkg buildResult
: [ Failed pkg' buildResult'
| Just pkg' <- map checkConfiguredPackage
$ packagesThatDependOn plan pkgid ]
-- | Lookup the reachable packages in the reverse dependency graph.
-- Does NOT include the package for @pkgid@!
--
packagesThatDependOn :: (HasUnitId ipkg, HasUnitId srcpkg)
=> GenericInstallPlan ipkg srcpkg iresult ifailure
-> UnitId
-> [GenericPlanPackage ipkg srcpkg iresult ifailure]
packagesThatDependOn plan pkgid = filter ((/= pkgid) . installedUnitId)
$ case Graph.revClosure (planIndex plan) [pkgid] of
Nothing -> []
Just r -> r
-- | Lookup a package that we expect to be in the processing state.
--
lookupProcessingPackage :: (PackageFixedDeps ipkg, PackageFixedDeps srcpkg,
HasUnitId ipkg, HasUnitId srcpkg)
=> GenericInstallPlan ipkg srcpkg iresult ifailure
-> UnitId
-> GenericReadyPackage srcpkg
lookupProcessingPackage plan pkgid =
case Graph.lookup pkgid (planIndex plan) of
Just (Processing pkg) -> pkg
_ -> internalError $ "not in processing state or no such pkg " ++
display pkgid
-- | Check a package that we expect to be in the configured or failed state.
--
checkConfiguredPackage :: (Package srcpkg, Package ipkg)
=> GenericPlanPackage ipkg srcpkg iresult ifailure
-> Maybe srcpkg
checkConfiguredPackage (Configured pkg) = Just pkg
checkConfiguredPackage (Failed _ _) = Nothing
checkConfiguredPackage pkg =
internalError $ "not configured or no such pkg " ++ display (packageId pkg)
-- | Replace a ready package with a pre-existing one. The pre-existing one
-- must have exactly the same dependencies as the source one was configured
-- with.
--
preexisting :: (HasUnitId ipkg, PackageFixedDeps ipkg,
HasUnitId srcpkg, PackageFixedDeps srcpkg)
=> UnitId
-> ipkg
-> GenericInstallPlan ipkg srcpkg iresult ifailure
-> GenericInstallPlan ipkg srcpkg iresult ifailure
preexisting pkgid ipkg plan = assert (invariant plan') plan'
where
plan' = plan {
planIndex = Graph.insert (PreExisting ipkg)
-- ...but be sure to use the *old* IPID for the lookup for
-- the preexisting record
. Graph.deleteKey pkgid
$ planIndex plan
}
-- | Replace a ready package with an installed one. The installed one
-- must have exactly the same dependencies as the source one was configured
-- with.
--
preinstalled :: (HasUnitId ipkg, PackageFixedDeps ipkg,
HasUnitId srcpkg, PackageFixedDeps srcpkg)
=> UnitId
-> Maybe ipkg -> iresult
-> GenericInstallPlan ipkg srcpkg iresult ifailure
-> GenericInstallPlan ipkg srcpkg iresult ifailure
preinstalled pkgid mipkg buildResult plan = assert (invariant plan') plan'
where
plan' = plan { planIndex = Graph.insert installed (planIndex plan) }
Just installed = do
Configured pkg <- Graph.lookup pkgid (planIndex plan)
rpkg <- lookupReadyPackage plan pkg
return (Installed rpkg mipkg buildResult)
-- ------------------------------------------------------------
-- * Checking validity of plans
-- ------------------------------------------------------------
-- | A valid installation plan is a set of packages that is 'acyclic',
-- 'closed' and 'consistent'. Also, every 'ConfiguredPackage' in the
-- plan has to have a valid configuration (see 'configuredPackageValid').
--
-- * if the result is @False@ use 'problems' to get a detailed list.
--
valid :: (HasUnitId ipkg, PackageFixedDeps ipkg,
HasUnitId srcpkg, PackageFixedDeps srcpkg)
=> IndependentGoals
-> PlanIndex ipkg srcpkg iresult ifailure
-> Bool
valid indepGoals index =
null $ problems indepGoals index
data PlanProblem ipkg srcpkg iresult ifailure =
PackageMissingDeps (GenericPlanPackage ipkg srcpkg iresult ifailure)
[PackageIdentifier]
| PackageCycle [GenericPlanPackage ipkg srcpkg iresult ifailure]
| PackageStateInvalid (GenericPlanPackage ipkg srcpkg iresult ifailure)
(GenericPlanPackage ipkg srcpkg iresult ifailure)
-- | For an invalid plan, produce a detailed list of problems as human readable
-- error messages. This is mainly intended for debugging purposes.
-- Use 'showPlanProblem' for a human readable explanation.
--
problems :: (HasUnitId ipkg, PackageFixedDeps ipkg,
HasUnitId srcpkg, PackageFixedDeps srcpkg)
=> IndependentGoals
-> PlanIndex ipkg srcpkg iresult ifailure
-> [PlanProblem ipkg srcpkg iresult ifailure]
problems _indepGoals index =
[ PackageMissingDeps pkg
(catMaybes
(map
(fmap packageId . flip Graph.lookup index)
missingDeps))
| (pkg, missingDeps) <- Graph.broken index ]
++ [ PackageCycle cycleGroup
| cycleGroup <- Graph.cycles index ]
++ [ PackageStateInvalid pkg pkg'
| pkg <- Graph.toList index
, Just pkg' <- map (flip Graph.lookup index)
(CD.flatDeps (depends pkg))
, not (stateDependencyRelation pkg pkg') ]
-- | The states of packages have that depend on each other must respect
-- this relation. That is for very case where package @a@ depends on
-- package @b@ we require that @dependencyStatesOk a b = True@.
--
stateDependencyRelation :: GenericPlanPackage ipkg srcpkg iresult ifailure
-> GenericPlanPackage ipkg srcpkg iresult ifailure
-> Bool
stateDependencyRelation (PreExisting _) (PreExisting _) = True
stateDependencyRelation (Configured _) (PreExisting _) = True
stateDependencyRelation (Configured _) (Configured _) = True
stateDependencyRelation (Configured _) (Processing _) = True
stateDependencyRelation (Configured _) (Installed _ _ _) = True
stateDependencyRelation (Processing _) (PreExisting _) = True
stateDependencyRelation (Processing _) (Installed _ _ _) = True
stateDependencyRelation (Installed _ _ _) (PreExisting _) = True
stateDependencyRelation (Installed _ _ _) (Installed _ _ _) = True
stateDependencyRelation (Failed _ _) (PreExisting _) = True
-- failed can depends on configured because a package can depend on
-- several other packages and if one of the deps fail then we fail
-- but we still depend on the other ones that did not fail:
stateDependencyRelation (Failed _ _) (Configured _) = True
stateDependencyRelation (Failed _ _) (Processing _) = True
stateDependencyRelation (Failed _ _) (Installed _ _ _) = True
stateDependencyRelation (Failed _ _) (Failed _ _) = True
stateDependencyRelation _ _ = False
reverseTopologicalOrder :: GenericInstallPlan ipkg srcpkg iresult ifailure
-> [GenericPlanPackage ipkg srcpkg iresult ifailure]
reverseTopologicalOrder plan = Graph.revTopSort (planIndex plan)
fromSolverInstallPlan ::
(HasUnitId ipkg, PackageFixedDeps ipkg,
HasUnitId srcpkg, PackageFixedDeps srcpkg)
-- Maybe this should be a UnitId not ConfiguredId?
=> ( (SolverId -> ConfiguredId)
-> SolverInstallPlan.SolverPlanPackage
-> GenericPlanPackage ipkg srcpkg iresult ifailure )
-> SolverInstallPlan
-> GenericInstallPlan ipkg srcpkg iresult ifailure
fromSolverInstallPlan f plan =
mkInstallPlan (Graph.fromList pkgs')
(SolverInstallPlan.planIndepGoals plan)
where
(_, pkgs') = foldl' f' (Map.empty, []) (SolverInstallPlan.reverseTopologicalOrder plan)
f' (pidMap, pkgs) pkg = (pidMap', pkg' : pkgs)
where
pkg' = f (mapDep pidMap) pkg
pidMap'
= case sid of
PreExistingId _pid uid ->
assert (uid == uid') pidMap
PlannedId pid ->
Map.insert pid uid' pidMap
where
sid = nodeKey pkg
uid' = nodeKey pkg'
mapDep _ (PreExistingId pid uid) = ConfiguredId pid uid
mapDep pidMap (PlannedId pid)
| Just uid <- Map.lookup pid pidMap
= ConfiguredId pid uid
-- This shouldn't happen, since mapDep should only be called
-- on neighbor SolverId, which must have all been done already
-- by the reverse top-sort (this also assumes that the graph
-- is not broken).
| otherwise
= error ("fromSolverInstallPlan mapDep: " ++ display pid)
-- | Conversion of 'SolverInstallPlan' to 'InstallPlan'.
-- Similar to 'elaboratedInstallPlan'
configureInstallPlan :: SolverInstallPlan -> InstallPlan
configureInstallPlan solverPlan =
flip fromSolverInstallPlan solverPlan $ \mapDep planpkg ->
case planpkg of
SolverInstallPlan.PreExisting pkg _ ->
PreExisting pkg
SolverInstallPlan.Configured pkg ->
Configured (configureSolverPackage mapDep pkg)
where
configureSolverPackage :: (SolverId -> ConfiguredId)
-> SolverPackage UnresolvedPkgLoc
-> ConfiguredPackage UnresolvedPkgLoc
configureSolverPackage mapDep spkg =
ConfiguredPackage {
confPkgId = SimpleUnitId
$ Configure.computeComponentId
Cabal.NoFlag
(packageId spkg)
(PD.CLibName (display (pkgName (packageId spkg))))
-- TODO: this is a hack that won't work for Backpack.
(map ((\(SimpleUnitId cid0) -> cid0) . confInstId)
(CD.libraryDeps deps))
(solverPkgFlags spkg),
confPkgSource = solverPkgSource spkg,
confPkgFlags = solverPkgFlags spkg,
confPkgStanzas = solverPkgStanzas spkg,
confPkgDeps = deps
}
where
deps = fmap (map mapDep) (solverPkgDeps spkg)
| kolmodin/cabal | cabal-install/Distribution/Client/InstallPlan.hs | bsd-3-clause | 26,972 | 0 | 18 | 6,725 | 5,113 | 2,697 | 2,416 | 408 | 7 |
module DroidSignatureFileFilter
( FilterOption(..)
, filterSigFile
, listFileFormats
) where
import Text.XML.Light
import Data.List ((\\), nub, intercalate)
import Data.Maybe (fromMaybe)
-- | Configuration options for the 'filterSigFile' function.
data FilterOption
-- | Include file formats that are supertypes of the selected formats.
= WithSupertypes
-- | Include file formats that are subtypes of the selected formats.
| WithSubtypes
deriving (Eq)
-- | Filter an XML string representing a DROID signature file based on a list
-- of PUIDs. Only those entries (file format and internal signature
-- descriptions) that are necessary to identify files with one of the given
-- PUIDs will occur in the resulting XML string. With an empty list of PUIDs,
-- the input XML string is returned unmodified.
filterSigFile :: [FilterOption] -- ^ Configuration options.
-> [String] -- ^ PUIDs.
-> String -- ^ Signature file XML content.
-> String -- ^ Filtered signature file XML content.
filterSigFile _ [] xml = xml
filterSigFile _ _ "" = ""
filterSigFile opts puids xml = case parseXMLDoc xml of
Nothing -> error "Failed to parse signature file."
Just e -> showTopElement $ replaceChildren e [isc', ffc']
where
isc = head $ findChildren (mkNm "InternalSignatureCollection") e
ffc = head $ findChildren (mkNm "FileFormatCollection") e
ffs = filterChildrenByAttr "PUID" puids ffc
ffc' = replaceChildren ffc $ nub $ ffs
++ (if WithSupertypes `elem` opts
then supertypes ffc ffs
else [])
++ (if WithSubtypes `elem` opts
then subtypes ffc ffs
else [])
ids = concatMap sigIDs $ findChildren (mkNm "FileFormat") ffc'
isc' = replaceChildren isc $ filterChildrenByAttr "ID" ids isc
-- | List the file formats that occur in an XML string representing a DROID
-- signature file. Each file format is represented by a string of this form:
--
-- > PUID <tab> name <tab> version
listFileFormats :: String -- ^ Signature file XML content.
-> [String] -- ^ File formats.
listFileFormats "" = []
listFileFormats xml = case parseXMLDoc xml of
Nothing -> error "Failed to parse signature file."
Just e -> map showFileFormat ffs
where
ffc = head $ findChildren (mkNm "FileFormatCollection") e
ffs = findChildren (mkNm "FileFormat") ffc
showFileFormat ff = intercalate "\t"
[ fromMaybe "" $ findAttr (unqual "PUID") ff
, fromMaybe "" $ findAttr (unqual "Name") ff
, fromMaybe "" $ findAttr (unqual "Version") ff
]
-- | Replace all content of an element by appending a list of elements as
-- children.
replaceChildren :: Element -> [Element] -> Element
replaceChildren e cs = e { elContent = map Elem cs }
-- | Filter the children of an element based on an attribute name and a list
-- of attribute values. Only those children that have an attribute with the
-- specified name and a value from the specified list will occur in the
-- resulting element list.
filterChildrenByAttr :: String -- ^ Attribute name.
-> [String] -- ^ Attribute values.
-> Element -- ^ Parent element.
-> [Element] -- ^ Filtered child elements.
filterChildrenByAttr a vs = filterChildren (f . findAttr (unqual a))
where
f (Just v) = v `elem` vs
f Nothing = False
-- | Find the file formats in a given file format collection that are (direct
-- or indirect) supertypes of one of the given elements.
supertypes :: Element -- ^ FileFormatCollection element.
-> [Element] -- ^ FileFormat elements.
-> [Element] -- ^ FileFormat elements (supertypes).
supertypes = related isSupertypeOf
-- | Find the file formats in a given file format collection that are (direct
-- or indirect) subtypes of one of the given elements.
subtypes :: Element -- ^ FileFormatCollection element.
-> [Element] -- ^ FileFormat elements.
-> [Element] -- ^ FileFormat elements (subtypes).
subtypes = related isSubtypeOf
-- | Find the file formats in a given file format collection that are
-- (directly or indirectly) related to one of the given file formats with
-- respect to a given predicate.
related :: (Element -> Element -> Bool) -- ^ Predicate that determines whether
-- two elements are related.
-> Element -- ^ FileFormatCollection element.
-> [Element] -- ^ FileFormat elements.
-> [Element] -- ^ FileFormat elements.
related pred ffc es = related' pred ffc es []
where
related' _ _ [] acc = acc
related' pred ffc es acc = related' pred ffc es' (acc ++ es')
where es' = filterChildren (\ff -> any (pred ff) es) ffc \\ acc
-- | Find the file format ID of a given element. If the element has no ID
-- attribute the empty string is returned.
fmtID :: Element -- ^ FileFormat element.
-> String -- ^ File format ID attribute value.
fmtID = fromMaybe "" . findAttr (unqual "ID")
-- | Find all internal signature IDs that are referenced by a given element.
sigIDs :: Element -- ^ FileFormat element.
-> [String] -- ^ Internal signature IDs.
sigIDs = map strContent . findChildren (mkNm "InternalSignatureID")
-- | Find the file format IDs of all file formats that are immediate
-- supertypes of a given element.
supFmtIDs :: Element -- ^ FileFormat element.
-> [String] -- ^ Supertype format IDs.
supFmtIDs = map strContent . findChildren (mkNm "HasPriorityOverFileFormatID")
-- | Check if a file format x is a supertype of another file format y, i.e.,
-- if the relation y HasPriorityOverFileFormatID x holds.
isSupertypeOf :: Element -- ^ FileFormat element x.
-> Element -- ^ FileFormat element y.
-> Bool
x `isSupertypeOf` y = fmtID x `elem` supFmtIDs y
-- | Check if a file format x is a subtype of another file format y, i.e., if
-- the relation x HasPriorityOverFileFormatID y holds.
isSubtypeOf :: Element -- ^ FileFormat element x.
-> Element -- ^ FileFormat element y.
-> Bool
isSubtypeOf = flip isSupertypeOf
-- | The DROID signature file namespace string.
sfNamespace = "http://www.nationalarchives.gov.uk/pronom/SignatureFile"
-- | Create a QName for a given element name based on the DROID signature file
-- namespace.
mkNm :: String -> QName
mkNm n = QName n (Just sfNamespace) Nothing
-- | Test two elements for equality. N.B., in this context two elements are
-- considered equal if their ID attributes have the same value. If one or both
-- (!) elements do not have an ID attribute they are not considered equal.
instance Eq Element where
x == y =
case map (findAttr (unqual "ID")) [x,y] of
[Nothing, Nothing] -> False
[xID, yID ] -> xID == yID
| marhop/droidsfmin | src/DroidSignatureFileFilter.hs | bsd-3-clause | 7,176 | 0 | 15 | 1,952 | 1,163 | 641 | 522 | 94 | 4 |
module Parser.ParseType
(parseType
,parseArgDefInterface)
where
import Parser.Syntax
import Parser.Parse
import Parser.ParseIdent
import Parser.ParseSpace
parseOpaqueType :: Parse Char [Type]
parseOpaqueType = greedy $ do
name <- parseUnresolvedIdent
return [OpaqueType name]
parseVoidType :: Parse Char [Type]
parseVoidType = greedy $ do
lit '('
optional kspace
lit ')'
return [VoidType]
parseTemplateParameterType :: Parse Char [Type]
parseTemplateParameterType = greedy $ do
lit '$'
name <- parseLocalIdent
return [TemplateParameterType name]
parseParenthesizedTypes :: Parse Char [Type]
parseParenthesizedTypes = kparenthesized parseType
parseArgDefInterface :: Parse Char ArgumentDefInterface
parseArgDefInterface = do
staticMode <- parseEither
(lits "static" >> kspace >> return StaticArg)
(return RuntimeArg)
maybeName <- optional $ do
lit '#'
optional kspace
name <- parseLocalIdent
optional kspace
lit ':'
optional kspace
return name
t <- parseType
return $ ArgumentDefInterface staticMode maybeName t
parseFunctionTypeArgs :: Parse Char [ArgumentDefInterface]
parseFunctionTypeArgs = kparenthesized parseArgDefInterface
parseFunctionType :: Parse Char [Type]
parseFunctionType = greedy $ do
lits "func"
optional kspace
args <- parseFunctionTypeArgs
optional kspace
lits "->"
optional kspace
ret <- parseType
return [FunctionType args ret]
parseAtomicType :: Parse Char [Type]
parseAtomicType = choice
[parseOpaqueType
,parseVoidType
,parseTemplateParameterType
,parseParenthesizedTypes
,parseFunctionType]
parseTemplateType :: [Type] -> Parse Char [Type]
parseTemplateType types = do
optional kspace
name <- parseUnresolvedIdent
return [TemplateType name types]
parseListType :: [Type] -> Parse Char [Type]
parseListType [t] = greedy $ do
optional kspace
lit '['
optional kspace
lit ']'
return [TemplateType (makeUnresolvedIdent "List") [t]]
parseListType _ = parseFailure
parseDictType :: [Type] -> Parse Char [Type]
parseDictType [valType] = greedy $ do
optional kspace
lit '['
optional kspace
keyType <- parseType
optional kspace
lit ']'
return $ [TemplateType (makeUnresolvedIdent "Dict") [keyType, valType]]
parseDictType _ = parseFailure
unaryKindParser :: (Type -> Type) -> Char -> [Type] -> Parse Char [Type]
unaryKindParser f char [t] = do
optional kspace
lit char
return [f t]
unaryKindParser _ _ _ = parseFailure
parseType :: Parse Char Type
parseType = do
types <- chainNest parseAtomicType
[parseTemplateType
,parseListType
,parseDictType
,unaryKindParser ReferenceType '!'
,unaryKindParser NullableType '?'
,unaryKindParser PointerType '&']
case types of
[t] -> return t
_ -> parseFailure
| Kiwi-Labs/kwirc | kwick/parser/ParseType.hs | mit | 2,709 | 10 | 12 | 424 | 887 | 423 | 464 | 99 | 2 |
{-# LANGUAGE FlexibleInstances #-}
{-# LANGUAGE TypeSynonymInstances #-}
-- |
-- Module: Options
-- License: MIT
--
-- The @options@ package lets library and application developers easily work
-- with command-line options.
--
-- The following example is a full program that can accept two options,
-- @--message@ and @--quiet@:
--
-- @
--import Control.Applicative
--import Options
--
--data MainOptions = MainOptions
-- { optMessage :: String
-- , optQuiet :: Bool
-- }
--
--instance 'Options' MainOptions where
-- 'defineOptions' = pure MainOptions
-- \<*\> 'simpleOption' \"message\" \"Hello world!\"
-- \"A message to show the user.\"
-- \<*\> 'simpleOption' \"quiet\" False
-- \"Whether to be quiet.\"
--
--main :: IO ()
--main = 'runCommand' $ \\opts args -> do
-- if optQuiet opts
-- then return ()
-- else putStrLn (optMessage opts)
-- @
--
-- >$ ./hello
-- >Hello world!
-- >$ ./hello --message='ciao mondo'
-- >ciao mondo
-- >$ ./hello --quiet
-- >$
--
-- In addition, this library will automatically create documentation options
-- such as @--help@ and @--help-all@:
--
-- >$ ./hello --help
-- >Help Options:
-- > -h, --help
-- > Show option summary.
-- > --help-all
-- > Show all help options.
-- >
-- >Application Options:
-- > --message :: text
-- > A message to show the user.
-- > default: "Hello world!"
-- > --quiet :: bool
-- > Whether to be quiet.
-- > default: false
module Options
(
-- * Defining options
Options(..)
, defaultOptions
, simpleOption
, DefineOptions
, SimpleOptionType(..)
-- * Defining subcommands
, Subcommand
, subcommand
-- * Running main with options
, runCommand
, runSubcommand
-- * Parsing argument lists
, Parsed
, parsedError
, parsedHelp
-- ** Parsing options
, ParsedOptions
, parsedOptions
, parsedArguments
, parseOptions
-- ** Parsing sub-commands
, ParsedSubcommand
, parsedSubcommand
, parseSubcommand
-- * Advanced option definitions
, OptionType
, defineOption
, Option
, optionShortFlags
, optionLongFlags
, optionDefault
, optionDescription
, optionGroup
-- ** Option groups
, Group
, group
, groupName
, groupTitle
, groupDescription
-- * Option types
, optionType_bool
, optionType_string
, optionType_int
, optionType_int8
, optionType_int16
, optionType_int32
, optionType_int64
, optionType_word
, optionType_word8
, optionType_word16
, optionType_word32
, optionType_word64
, optionType_integer
, optionType_float
, optionType_double
, optionType_maybe
, optionType_list
, optionType_set
, optionType_map
, optionType_enum
-- ** Custom option types
, optionType
, optionTypeName
, optionTypeDefault
, optionTypeParse
, optionTypeShow
, optionTypeUnary
, optionTypeMerge
) where
import Control.Applicative
import Control.Monad (forM_)
import Control.Monad.Error (ErrorT, runErrorT, throwError)
import Control.Monad.IO.Class (liftIO, MonadIO)
import Data.Functor.Identity
import Data.Int
import Data.List (intercalate)
import qualified Data.Map as Map
import Data.Maybe (isJust)
import qualified Data.Set as Set
import Data.Word
import qualified System.Environment
import System.Exit (exitFailure, exitSuccess)
import System.IO (hPutStr, hPutStrLn, stderr, stdout)
import Options.Help
import Options.Tokenize
import Options.Types
import Options.Util (mapEither)
-- | Options are defined together in a single data type, which will be an
-- instance of 'Options'.
--
-- See 'defineOptions' for details on defining instances of 'Options'.
class Options opts where
-- | Defines the structure and metadata of the options in this type,
-- including their types, flag names, and documentation.
--
-- Options with a basic type and a single flag name may be defined
-- with 'simpleOption'. Options with more complex requirements may
-- be defined with 'defineOption'.
--
-- Non-option fields in the type may be set using applicative functions
-- such as 'pure'.
--
-- Options may be included from another type by using a nested call to
-- 'defineOptions'.
--
-- Library authors are encouraged to aggregate their options into a
-- few top-level types, so application authors can include it
-- easily in their own option definitions.
defineOptions :: DefineOptions opts
data DefineOptions a = DefineOptions a (Integer -> (Integer, [OptionInfo])) (Integer -> Map.Map OptionKey [Token] -> Either String (Integer, a))
instance Functor DefineOptions where
fmap fn (DefineOptions defaultValue getInfo parse) = DefineOptions (fn defaultValue) getInfo (\key tokens -> case parse key tokens of
Left err -> Left err
Right (key', a) -> Right (key', fn a))
instance Applicative DefineOptions where
pure a = DefineOptions a (\key -> (key, [])) (\key _ -> Right (key, a))
(DefineOptions acc_default acc_getInfo acc_parse) <*> (DefineOptions defaultValue getInfo parse) = DefineOptions
(acc_default defaultValue)
(\key -> case acc_getInfo key of
(key', infos) -> case getInfo key' of
(key'', infos') -> (key'', infos ++ infos'))
(\key tokens -> case acc_parse key tokens of
Left err -> Left err
Right (key', fn) -> case parse key' tokens of
Left err -> Left err
Right (key'', a) -> Right (key'', fn a))
-- | An options value containing only the default values for each option.
-- This is equivalent to the options value when parsing an empty argument
-- list.
defaultOptions :: Options opts => opts
defaultOptions = case defineOptions of
(DefineOptions def _ _) -> def
-- | An option's type determines how the option will be parsed, and which
-- Haskell type the parsed value will be stored as. There are many types
-- available, covering most basic types and a few more advanced types.
data OptionType val = OptionType
{
-- | The name of this option type; used in @--help@ output.
optionTypeName :: String
-- | The default value for options of this type. This will be used
-- if 'optionDefault' is not set when defining the option.
, optionTypeDefault :: val
-- | Try to parse the given string to an option value. If parsing
-- fails, an error message will be returned.
, optionTypeParse :: String -> Either String val
-- | Format the value for display; used in @--help@ output.
, optionTypeShow :: val -> String
-- | If not Nothing, then options of this type may be set by a unary
-- flag. The option will be parsed as if the given value were set.
, optionTypeUnary :: Maybe val
-- | If not Nothing, then options of this type may be set with repeated
-- flags. Each flag will be parsed with 'optionTypeParse', and the
-- resulting parsed values will be passed to this function for merger
-- into the final value.
, optionTypeMerge :: Maybe ([val] -> val)
}
-- | Define an option group with the given name and title. Use
-- 'groupDescription' to add additional descriptive text, if needed.
group :: String -- ^ Name
-> String -- ^ Title; see 'groupTitle'.
-> String -- ^ Description; see 'groupDescription'.
-> Group
group = Group
-- | Define a new option type with the given name, default, and behavior.
optionType :: String -- ^ Name
-> val -- ^ Default value
-> (String -> Either String val) -- ^ Parser
-> (val -> String) -- ^ Formatter
-> OptionType val
optionType name def parse show' = OptionType name def parse show' Nothing Nothing
class SimpleOptionType a where
simpleOptionType :: OptionType a
instance SimpleOptionType Bool where
simpleOptionType = optionType_bool
-- | Store an option as a @'Bool'@. The option's value must be either
-- @\"true\"@ or @\"false\"@.
--
-- Boolean options are unary, which means that their value is optional when
-- specified on the command line. If a flag is present, the option is set to
-- True.
--
-- >$ ./app -q
-- >$ ./app --quiet
--
-- Boolean options may still be specified explicitly by using long flags with
-- the @--flag=value@ format. This is the only way to set a unary flag to
-- @\"false\"@.
--
-- >$ ./app --quiet=true
-- >$ ./app --quiet=false
optionType_bool :: OptionType Bool
optionType_bool = (optionType "bool" False parseBool (\x -> if x then "true" else "false"))
{ optionTypeUnary = Just True
}
parseBool :: String -> Either String Bool
parseBool s = case s of
"true" -> Right True
"false" -> Right False
_ -> Left (show s ++ " is not in {\"true\", \"false\"}.")
instance SimpleOptionType String where
simpleOptionType = optionType_string
-- | Store an option value as a @'String'@. The value is decoded to Unicode
-- first, if needed. The value may contain non-Unicode bytes, in which case
-- they will be stored using GHC 7.4's encoding for mixed-use strings.
optionType_string :: OptionType String
optionType_string = optionType "text" "" Right show
instance SimpleOptionType Integer where
simpleOptionType = optionType_integer
-- | Store an option as an @'Integer'@. The option value must be an integer.
-- There is no minimum or maximum value.
optionType_integer :: OptionType Integer
optionType_integer = optionType "integer" 0 parseInteger show
parseInteger :: String -> Either String Integer
parseInteger s = parsed where
parsed = if valid
then Right (read s)
else Left (show s ++ " is not an integer.")
valid = case s of
[] -> False
'-':s' -> allDigits s'
_ -> allDigits s
allDigits = all (\c -> c >= '0' && c <= '9')
parseBoundedIntegral :: (Bounded a, Integral a) => String -> String -> Either String a
parseBoundedIntegral label = parse where
getBounds :: (Bounded a, Integral a) => (String -> Either String a) -> a -> a -> (Integer, Integer)
getBounds _ min' max' = (toInteger min', toInteger max')
(minInt, maxInt) = getBounds parse minBound maxBound
parse s = case parseInteger s of
Left err -> Left err
Right int -> if int < minInt || int > maxInt
then Left (show int ++ " is not within bounds [" ++ show minInt ++ ":" ++ show maxInt ++ "] of type " ++ label ++ ".")
else Right (fromInteger int)
optionTypeBoundedInt :: (Bounded a, Integral a, Show a) => String -> OptionType a
optionTypeBoundedInt tName = optionType tName 0 (parseBoundedIntegral tName) show
instance SimpleOptionType Int where
simpleOptionType = optionType_int
-- | Store an option as an @'Int'@. The option value must be an integer /n/
-- such that @'minBound' <= n <= 'maxBound'@.
optionType_int :: OptionType Int
optionType_int = optionTypeBoundedInt "int"
instance SimpleOptionType Int8 where
simpleOptionType = optionType_int8
-- | Store an option as an @'Int8'@. The option value must be an integer /n/
-- such that @'minBound' <= n <= 'maxBound'@.
optionType_int8 :: OptionType Int8
optionType_int8 = optionTypeBoundedInt "int8"
instance SimpleOptionType Int16 where
simpleOptionType = optionType_int16
-- | Store an option as an @'Int16'@. The option value must be an integer /n/
-- such that @'minBound' <= n <= 'maxBound'@.
optionType_int16 :: OptionType Int16
optionType_int16 = optionTypeBoundedInt "int16"
instance SimpleOptionType Int32 where
simpleOptionType = optionType_int32
-- | Store an option as an @'Int32'@. The option value must be an integer /n/
-- such that @'minBound' <= n <= 'maxBound'@.
optionType_int32 :: OptionType Int32
optionType_int32 = optionTypeBoundedInt "int32"
instance SimpleOptionType Int64 where
simpleOptionType = optionType_int64
-- | Store an option as an @'Int64'@. The option value must be an integer /n/
-- such that @'minBound' <= n <= 'maxBound'@.
optionType_int64 :: OptionType Int64
optionType_int64 = optionTypeBoundedInt "int64"
instance SimpleOptionType Word where
simpleOptionType = optionType_word
-- | Store an option as a @'Word'@. The option value must be a positive
-- integer /n/ such that @0 <= n <= 'maxBound'@.
optionType_word :: OptionType Word
optionType_word = optionTypeBoundedInt "uint"
instance SimpleOptionType Word8 where
simpleOptionType = optionType_word8
-- | Store an option as a @'Word8'@. The option value must be a positive
-- integer /n/ such that @0 <= n <= 'maxBound'@.
optionType_word8 :: OptionType Word8
optionType_word8 = optionTypeBoundedInt "uint8"
instance SimpleOptionType Word16 where
simpleOptionType = optionType_word16
-- | Store an option as a @'Word16'@. The option value must be a positive
-- integer /n/ such that @0 <= n <= 'maxBound'@.
optionType_word16 :: OptionType Word16
optionType_word16 = optionTypeBoundedInt "uint16"
instance SimpleOptionType Word32 where
simpleOptionType = optionType_word32
-- | Store an option as a @'Word32'@. The option value must be a positive
-- integer /n/ such that @0 <= n <= 'maxBound'@.
optionType_word32 :: OptionType Word32
optionType_word32 = optionTypeBoundedInt "uint32"
instance SimpleOptionType Word64 where
simpleOptionType = optionType_word64
-- | Store an option as a @'Word64'@. The option value must be a positive
-- integer /n/ such that @0 <= n <= 'maxBound'@.
optionType_word64 :: OptionType Word64
optionType_word64 = optionTypeBoundedInt "uint64"
instance SimpleOptionType Float where
simpleOptionType = optionType_float
-- | Store an option as a @'Float'@. The option value must be a number. Due to
-- the imprecision of floating-point math, the stored value might not exactly
-- match the user's input. If the user's input is out of range for the
-- @'Float'@ type, it will be stored as @Infinity@ or @-Infinity@.
optionType_float :: OptionType Float
optionType_float = optionType "float32" 0 parseFloat show
instance SimpleOptionType Double where
simpleOptionType = optionType_double
-- | Store an option as a @'Double'@. The option value must be a number. Due to
-- the imprecision of floating-point math, the stored value might not exactly
-- match the user's input. If the user's input is out of range for the
-- @'Double'@ type, it will be stored as @Infinity@ or @-Infinity@.
optionType_double :: OptionType Double
optionType_double = optionType "float64" 0 parseFloat show
parseFloat :: Read a => String -> Either String a
parseFloat s = case reads s of
[(x, "")] -> Right x
_ -> Left (show s ++ " is not a number.")
instance SimpleOptionType a => SimpleOptionType (Maybe a) where
simpleOptionType = optionType_maybe simpleOptionType
-- | Store an option as a @'Maybe'@ of another type. The value will be
-- @Nothing@ if the option is set to an empty string.
optionType_maybe :: OptionType a -> OptionType (Maybe a)
optionType_maybe t = maybeT { optionTypeUnary = unary } where
maybeT = optionType name Nothing (parseMaybe t) (showMaybe t)
name = "maybe<" ++ optionTypeName t ++ ">"
unary = case optionTypeUnary t of
Nothing -> Nothing
Just val -> Just (Just val)
parseMaybe :: OptionType val -> String -> Either String (Maybe val)
parseMaybe t s = case s of
"" -> Right Nothing
_ -> case optionTypeParse t s of
Left err -> Left err
Right a -> Right (Just a)
showMaybe :: OptionType val -> Maybe val -> String
showMaybe _ Nothing = ""
showMaybe t (Just x) = optionTypeShow t x
-- | Store an option as a @'Set.Set'@, using another option type for the
-- elements. The separator should be a character that will not occur within
-- the values, such as a comma or semicolon.
--
-- Duplicate elements in the input are permitted.
optionType_set :: Ord a
=> Char -- ^ Element separator
-> OptionType a -- ^ Element type
-> OptionType (Set.Set a)
optionType_set sep t = optionType name Set.empty parseSet showSet where
name = "set<" ++ optionTypeName t ++ ">"
parseSet s = case parseList (optionTypeParse t) (split sep s) of
Left err -> Left err
Right xs -> Right (Set.fromList xs)
showSet xs = intercalate [sep] (map (optionTypeShow t) (Set.toList xs))
-- | Store an option as a 'Map.Map', using other option types for the keys and
-- values.
--
-- The item separator is used to separate key/value pairs from eachother. It
-- should be a character that will not occur within either the keys or values.
--
-- The value separator is used to separate the key from the value. It should
-- be a character that will not occur within the keys. It may occur within the
-- values.
--
-- Duplicate keys in the input are permitted. The final value for each key is
-- stored.
optionType_map :: Ord k
=> Char -- ^ Item separator
-> Char -- ^ Key/Value separator
-> OptionType k -- ^ Key type
-> OptionType v -- ^ Value type
-> OptionType (Map.Map k v)
optionType_map itemSep keySep kt vt = optionType name Map.empty parser showMap where
name = "map<" ++ optionTypeName kt ++ "," ++ optionTypeName vt ++ ">"
parser s = parseMap keySep (optionTypeParse kt) (optionTypeParse vt) (split itemSep s)
showMap m = intercalate [itemSep] (map showItem (Map.toList m))
showItem (k, v) = optionTypeShow kt k ++ [keySep] ++ optionTypeShow vt v
parseList :: (String -> Either String a) -> [String] -> Either String [a]
parseList p = loop where
loop [] = Right []
loop (x:xs) = case p x of
Left err -> Left err
Right v -> case loop xs of
Left err -> Left err
Right vs -> Right (v:vs)
parseMap :: Ord k => Char -> (String -> Either String k) -> (String -> Either String v) -> [String] -> Either String (Map.Map k v)
parseMap keySep pKey pVal = parsed where
parsed strs = case parseList pItem strs of
Left err -> Left err
Right xs -> Right (Map.fromList xs)
pItem s = case break (== keySep) s of
(sKey, valAndSep) -> case valAndSep of
[] -> Left ("Map item " ++ show s ++ " has no value.")
_ : sVal -> case pKey sKey of
Left err -> Left err
Right key -> case pVal sVal of
Left err -> Left err
Right val -> Right (key, val)
split :: Char -> String -> [String]
split _ [] = []
split sep s0 = loop s0 where
loop s = let
(chunk, rest) = break (== sep) s
cont = chunk : loop (tail rest)
in if null rest then [chunk] else cont
-- | Store an option as a list, using another option type for the elements.
-- The separator should be a character that will not occur within the values,
-- such as a comma or semicolon.
optionType_list :: Char -- ^ Element separator
-> OptionType a -- ^ Element type
-> OptionType [a]
optionType_list sep t = optionType name [] parser shower where
name = "list<" ++ optionTypeName t ++ ">"
parser s = parseList (optionTypeParse t) (split sep s)
shower xs = intercalate [sep] (map (optionTypeShow t) xs)
-- | Store an option as one of a set of possible values. The type must be a
-- bounded enumeration, and the type's 'Show' instance will be used to
-- implement the parser.
--
-- This is a simplistic implementation, useful for quick scripts. Users with
-- more complex requirements for enum parsing are encouraged to define their
-- own option types using 'optionType'.
--
-- @
--data Action = Hello | Goodbye
-- deriving (Bounded, Enum, Show)
--
--data MainOptions = MainOptions { optAction :: Action }
--
--instance 'Options' MainOptions where
-- 'defineOptions' = pure MainOptions
-- \<*\> 'defineOption' (optionType_enum \"action\") (\\o -> o
-- { 'optionLongFlags' = [\"action\"]
-- , 'optionDefault' = Hello
-- })
--
--main = 'runCommand' $ \\opts args -> do
-- putStrLn (\"Running action \" ++ show (optAction opts))
-- @
--
-- >$ ./app
-- >Running action Hello
-- >$ ./app --action=Goodbye
-- >Running action Goodbye
optionType_enum :: (Bounded a, Enum a, Show a)
=> String -- ^ Option type name
-> OptionType a
optionType_enum tName = optionType tName minBound parseEnum show where
values = Map.fromList [(show x, x) | x <- enumFrom minBound]
setString = "{" ++ intercalate ", " (map show (Map.keys values)) ++ "}"
parseEnum s = case Map.lookup s values of
Nothing -> Left (show s ++ " is not in " ++ setString ++ ".")
Just x -> Right x
-- | Defines a new option in the current options type.
--
simpleOption :: SimpleOptionType a
=> String -- long flag
-> a -- default value
-> String -- description
-> DefineOptions a
simpleOption flag def desc = defineOption simpleOptionType (\o -> o
{ optionLongFlags = [flag]
, optionDefault = def
, optionDescription = desc
})
-- | Defines a new option in the current options type.
--
-- All options must have one or more /flags/. Options may also have a
-- default value, a description, and a group.
--
-- The /flags/ are how the user specifies an option on the command line. Flags
-- may be /short/ or /long/. See 'optionShortFlags' and 'optionLongFlags' for
-- details.
--
-- @
--'defineOption' 'optionType_word16' (\\o -> o
-- { 'optionLongFlags' = [\"port\"]
-- , 'optionDefault' = 80
-- })
-- @
defineOption :: OptionType a -> (Option a -> Option a) -> DefineOptions a
defineOption t fn = DefineOptions (optionDefault opt) getInfo parser where
opt = fn (Option
{ optionShortFlags = []
, optionLongFlags = []
, optionDefault = optionTypeDefault t
, optionDescription = ""
, optionGroup = Nothing
, optionLocation = Nothing
})
getInfo key = (key+1, [OptionInfo
{ optionInfoKey = OptionKeyGenerated key
, optionInfoShortFlags = optionShortFlags opt
, optionInfoLongFlags = optionLongFlags opt
, optionInfoDefault = optionTypeShow t (optionDefault opt)
, optionInfoDescription = optionDescription opt
, optionInfoGroup = optionGroup opt
, optionInfoLocation = optionLocation opt
, optionInfoTypeName = optionTypeName t
, optionInfoUnary = isJust (optionTypeUnary t)
, optionInfoUnaryOnly = False
}])
-- parseToken :: Token -> Either String val
parseToken tok = case tok of
TokenUnary flagName -> case optionTypeUnary t of
Nothing -> Left ("The flag " ++ flagName ++ " requires an argument.")
Just val -> Right val
Token flagName rawValue -> case optionTypeParse t rawValue of
Left err -> Left ("Value for flag " ++ flagName ++ " is invalid: " ++ err)
Right val -> Right val
parser key tokens = case Map.lookup (OptionKeyGenerated key) tokens of
Nothing -> Right (key+1, optionDefault opt)
Just toks -> case toks of
-- shouldn't happen, but lets do something graceful anyway.
[] -> Right (key+1, optionDefault opt)
[tok] -> case parseToken tok of
Left err -> Left err
Right val -> Right (key+1, val)
_ -> case optionTypeMerge t of
Nothing -> Left ("Multiple values for flag: " ++ showMultipleFlagValues toks)
Just appendFn -> case mapEither parseToken toks of
Left err -> Left err
Right vals -> Right (key+1, appendFn vals)
showMultipleFlagValues :: [Token] -> String
showMultipleFlagValues = intercalate " " . map showToken where
showToken (TokenUnary flagName) = flagName
showToken (Token flagName rawValue) = show (flagName ++ "=" ++ rawValue)
data Option a = Option
{
-- | Short flags are a single character. When entered by a user,
-- they are preceded by a dash and possibly other short flags.
--
-- Short flags must be a letter or a number.
--
-- Example: An option with @optionShortFlags = [\'p\']@ may be set using:
--
-- >$ ./app -p 443
-- >$ ./app -p443
optionShortFlags :: [Char]
-- | Long flags are multiple characters. When entered by a user, they
-- are preceded by two dashes.
--
-- Long flags may contain letters, numbers, @\'-\'@, and @\'_\'@.
--
-- Example: An option with @optionLongFlags = [\"port\"]@ may be set using:
--
-- >$ ./app --port 443
-- >$ ./app --port=443
, optionLongFlags :: [String]
-- | Options may have a default value. This will be parsed as if the
-- user had entered it on the command line.
, optionDefault :: a
-- | An option's description is used with the default implementation
-- of @--help@. It should be a short string describing what the option
-- does.
, optionDescription :: String
-- | Which group the option is in. See the \"Option groups\" section
-- for details.
, optionGroup :: Maybe Group
-- | TODO docs
, optionLocation :: Maybe Location
}
validateOptionDefs :: [OptionInfo] -> [(String, [OptionInfo])] -> Either String OptionDefinitions
validateOptionDefs cmdInfos subInfos = runIdentity $ runErrorT $ do
-- All subcommands have unique names.
let subcmdNames = map fst subInfos
if Set.size (Set.fromList subcmdNames) /= length subcmdNames
-- TODO: the error should mention which subcommand names are duplicated
then throwError "Multiple subcommands exist with the same name."
else return ()
-- Each option defines at least one short or long flag.
let allOptInfos = cmdInfos ++ concat [infos | (_, infos) <- subInfos]
case mapEither optValidFlags allOptInfos of
Left err -> throwError err
Right _ -> return ()
-- There are no duplicate short or long flags, unless:
-- The flags are defined in separate subcommands.
-- The flags have identical OptionInfos (aside from keys)
cmdDeDupedFlags <- checkNoDuplicateFlags Map.empty cmdInfos
forM_ subInfos (\subInfo -> checkNoDuplicateFlags cmdDeDupedFlags (snd subInfo))
return (addHelpFlags (OptionDefinitions cmdInfos subInfos))
optValidFlags :: OptionInfo -> Either String ()
optValidFlags info = if null (optionInfoShortFlags info) && null (optionInfoLongFlags info)
then case optionInfoLocation info of
Nothing -> Left ("Option with description " ++ show (optionInfoDescription info) ++ " has no flags.")
Just loc -> Left ("Option with description " ++ show (optionInfoDescription info) ++ " at " ++ locationFilename loc ++ ":" ++ show (locationLine loc) ++ " has no flags.")
-- TODO: All short or long flags have a reasonable name.
else Right ()
data DeDupFlag = DeDupShort Char | DeDupLong String
deriving (Eq, Ord, Show)
checkNoDuplicateFlags :: Map.Map DeDupFlag OptionInfo -> [OptionInfo] -> ErrorT String Identity (Map.Map DeDupFlag OptionInfo)
checkNoDuplicateFlags checked [] = return checked
checkNoDuplicateFlags checked (info:infos) = do
let mappedShort = map DeDupShort (optionInfoShortFlags info)
let mappedLong = map DeDupLong (optionInfoLongFlags info)
let mappedFlags = mappedShort ++ mappedLong
forM_ mappedFlags $ \mapKey -> case Map.lookup mapKey checked of
Nothing -> return ()
Just prevInfo -> if eqIgnoringKey info prevInfo
then return ()
else let
flagName = case mapKey of
DeDupShort flag -> '-' : flag : []
DeDupLong long -> "--" ++ long
in throwError ("Duplicate option flag " ++ show flagName ++ ".")
let infoMap = Map.fromList [(f, info) | f <- mappedFlags]
checkNoDuplicateFlags (Map.union checked infoMap) infos
eqIgnoringKey :: OptionInfo -> OptionInfo -> Bool
eqIgnoringKey x y = normKey x == normKey y where
normKey info = info { optionInfoKey = OptionKeyIgnored }
-- | See @'parseOptions'@ and @'parseSubcommand'@.
class Parsed a where
parsedError_ :: a -> Maybe String
parsedHelp_ :: a -> String
-- | See @'parseOptions'@.
data ParsedOptions opts = ParsedOptions (Maybe opts) (Maybe String) String [String]
-- | See @'parseSubcommand'@.
data ParsedSubcommand action = ParsedSubcommand (Maybe action) (Maybe String) String
instance Parsed (ParsedOptions a) where
parsedError_ (ParsedOptions _ x _ _) = x
parsedHelp_ (ParsedOptions _ _ x _) = x
instance Parsed (ParsedSubcommand a) where
parsedError_ (ParsedSubcommand _ x _) = x
parsedHelp_ (ParsedSubcommand _ _ x) = x
-- | Get the options value that was parsed from argv, or @Nothing@ if the
-- arguments could not be converted into options.
--
-- Note: This function return @Nothing@ if the user provided a help flag. To
-- check whether an error occured during parsing, check the value of
-- @'parsedError'@.
parsedOptions :: ParsedOptions opts -> Maybe opts
parsedOptions (ParsedOptions x _ _ _) = x
-- | Get command-line arguments remaining after parsing options. The arguments
-- are unchanged from the original argument list, and have not been decoded
-- or otherwise transformed.
parsedArguments :: ParsedOptions opts -> [String]
parsedArguments (ParsedOptions _ _ _ x) = x
-- | Get the subcommand action that was parsed from argv, or @Nothing@ if the
-- arguments could not be converted into a valid action.
--
-- Note: This function return @Nothing@ if the user provided a help flag. To
-- check whether an error occured during parsing, check the value of
-- @'parsedError'@.
parsedSubcommand :: ParsedSubcommand action -> Maybe action
parsedSubcommand (ParsedSubcommand x _ _) = x
-- | Get the error that prevented options from being parsed from argv,
-- or @Nothing@ if no error was detected.
parsedError :: Parsed a => a -> Maybe String
parsedError = parsedError_
-- | Get a help message to show the user. If the arguments included
-- a help flag, this will be a message appropriate to that flag.
-- Otherwise, it is a summary (equivalent to @--help@).
--
-- This is always a non-empty string, regardless of whether the parse
-- succeeded or failed. If you need to perform additional validation
-- on the options value, this message can be displayed if validation
-- fails.
parsedHelp :: Parsed a => a -> String
parsedHelp = parsedHelp_
-- | Attempt to convert a list of command-line arguments into an options
-- value. This can be used by application developers who want finer control
-- over error handling, or who want to perform additional validation on the
-- options value.
--
-- The argument list must be in the same encoding as the result of
-- 'System.Environment.getArgs'.
--
-- Use @'parsedOptions'@, @'parsedArguments'@, @'parsedError'@, and
-- @'parsedHelp'@ to inspect the result of @'parseOptions'@.
--
-- Example:
--
-- @
--getOptionsOrDie :: Options a => IO a
--getOptionsOrDie = do
-- argv <- System.Environment.getArgs
-- let parsed = 'parseOptions' argv
-- case 'parsedOptions' parsed of
-- Just opts -> return opts
-- Nothing -> case 'parsedError' parsed of
-- Just err -> do
-- hPutStrLn stderr ('parsedHelp' parsed)
-- hPutStrLn stderr err
-- exitFailure
-- Nothing -> do
-- hPutStr stdout ('parsedHelp' parsed)
-- exitSuccess
-- @
parseOptions :: Options opts => [String] -> ParsedOptions opts
parseOptions argv = parsed where
(DefineOptions _ getInfos parser) = defineOptions
(_, optionInfos) = getInfos 0
parseTokens = parser 0
parsed = case validateOptionDefs optionInfos [] of
Left err -> ParsedOptions Nothing (Just err) "" []
Right optionDefs -> case tokenize (addHelpFlags optionDefs) argv of
(_, Left err) -> ParsedOptions Nothing (Just err) (helpFor HelpSummary optionDefs Nothing) []
(_, Right tokens) -> case checkHelpFlag tokens of
Just helpFlag -> ParsedOptions Nothing Nothing (helpFor helpFlag optionDefs Nothing) []
Nothing -> case parseTokens (tokensMap tokens) of
Left err -> ParsedOptions Nothing (Just err) (helpFor HelpSummary optionDefs Nothing) []
Right (_, opts) -> ParsedOptions (Just opts) Nothing (helpFor HelpSummary optionDefs Nothing) (tokensArgv tokens)
-- | Retrieve 'System.Environment.getArgs', and attempt to parse it into a
-- valid value of an 'Options' type plus a list of left-over arguments. The
-- options and arguments are then passed to the provided computation.
--
-- If parsing fails, this computation will print an error and call
-- 'exitFailure'.
--
-- If parsing succeeds, and the user has passed a @--help@ flag, and the
-- developer is using the default help flag definitions, then this computation
-- will print documentation and call 'exitSuccess'.
--
-- See 'runSubcommand' for details on subcommand support.
runCommand :: (MonadIO m, Options opts) => (opts -> [String] -> m a) -> m a
runCommand io = do
argv <- liftIO System.Environment.getArgs
let parsed = parseOptions argv
case parsedOptions parsed of
Just opts -> io opts (parsedArguments parsed)
Nothing -> liftIO $ case parsedError parsed of
Just err -> do
hPutStrLn stderr (parsedHelp parsed)
hPutStrLn stderr err
exitFailure
Nothing -> do
hPutStr stdout (parsedHelp parsed)
exitSuccess
data Subcommand cmdOpts action = Subcommand String (Integer -> ([OptionInfo], (cmdOpts -> Tokens -> Either String action), Integer))
subcommand :: (Options cmdOpts, Options subcmdOpts)
=> String -- ^ The subcommand name.
-> (cmdOpts -> subcmdOpts -> [String] -> action) -- ^ The action to run.
-> Subcommand cmdOpts action
subcommand name fn = Subcommand name (\initialKey -> let
(DefineOptions _ getInfos parser) = defineOptions
(nextKey, optionInfos) = getInfos initialKey
parseTokens = parser initialKey
runAction cmdOpts tokens = case parseTokens (tokensMap tokens) of
Left err -> Left err
Right (_, subOpts) -> Right (fn cmdOpts subOpts (tokensArgv tokens))
in (optionInfos, runAction, nextKey))
-- | Attempt to convert a list of command-line arguments into a subcommand
-- action. This can be used by application developers who want finer control
-- over error handling, or who want subcommands that run in an unusual monad.
--
-- The argument list must be in the same encoding as the result of
-- 'System.Environment.getArgs'.
--
-- Use @'parsedSubcommand'@, @'parsedError'@, and @'parsedHelp'@ to inspect the
-- result of @'parseSubcommand'@.
--
-- Example:
--
-- @
--runSubcommand :: Options cmdOpts => [Subcommand cmdOpts (IO a)] -> IO a
--runSubcommand subcommands = do
-- argv <- System.Environment.getArgs
-- let parsed = 'parseSubcommand' subcommands argv
-- case 'parsedSubcommand' parsed of
-- Just cmd -> cmd
-- Nothing -> case 'parsedError' parsed of
-- Just err -> do
-- hPutStrLn stderr ('parsedHelp' parsed)
-- hPutStrLn stderr err
-- exitFailure
-- Nothing -> do
-- hPutStr stdout ('parsedHelp' parsed)
-- exitSuccess
-- @
--
parseSubcommand :: Options cmdOpts => [Subcommand cmdOpts action] -> [String] -> ParsedSubcommand action
parseSubcommand subcommands argv = parsed where
(DefineOptions _ getInfos parser) = defineOptions
(cmdNextKey, cmdInfos) = getInfos 0
cmdParseTokens = parser 0
subcmdInfos = do
Subcommand name fn <- subcommands
let (infos, _, _) = fn cmdNextKey
return (name, infos)
subcmdRunners = Map.fromList $ do
Subcommand name fn <- subcommands
let (_, runner, _) = fn cmdNextKey
return (name, runner)
parsed = case validateOptionDefs cmdInfos subcmdInfos of
Left err -> ParsedSubcommand Nothing (Just err) ""
Right optionDefs -> case tokenize (addHelpFlags optionDefs) argv of
(subcmd, Left err) -> ParsedSubcommand Nothing (Just err) (helpFor HelpSummary optionDefs subcmd)
(subcmd, Right tokens) -> case checkHelpFlag tokens of
Just helpFlag -> ParsedSubcommand Nothing Nothing (helpFor helpFlag optionDefs subcmd)
Nothing -> case findAction tokens subcmd of
Left err -> ParsedSubcommand Nothing (Just err) (helpFor HelpSummary optionDefs subcmd)
Right action -> ParsedSubcommand (Just action) Nothing (helpFor HelpSummary optionDefs subcmd)
findAction _ Nothing = Left "No subcommand specified"
findAction tokens (Just subcmdName) = case cmdParseTokens (tokensMap tokens) of
Left err -> Left err
Right (_, cmdOpts) -> case Map.lookup subcmdName subcmdRunners of
Nothing -> Left ("Unknown subcommand " ++ show subcmdName ++ ".")
Just getRunner -> case getRunner cmdOpts tokens of
Left err -> Left err
Right action -> Right action
-- | Used to run applications that are split into subcommands.
--
-- Use 'subcommand' to define available commands and their actions, then pass
-- them to this computation to select one and run it. If the user specifies
-- an invalid subcommand, this computation will print an error and call
-- 'exitFailure'. In handling of invalid flags or @--help@, 'runSubcommand'
-- acts like 'runCommand'.
--
-- @
--import Control.Applicative
--import Control.Monad (unless)
--import Options
--
--data MainOptions = MainOptions { optQuiet :: Bool }
--instance 'Options' MainOptions where
-- 'defineOptions' = pure MainOptions
-- \<*\> 'simpleOption' \"quiet\" False \"Whether to be quiet.\"
--
--data HelloOpts = HelloOpts { optHello :: String }
--instance 'Options' HelloOpts where
-- 'defineOptions' = pure HelloOpts
-- \<*\> 'simpleOption' \"hello\" \"Hello!\" \"How to say hello.\"
--
--data ByeOpts = ByeOpts { optName :: String }
--instance 'Options' ByeOpts where
-- 'defineOptions' = pure ByeOpts
-- \<*\> 'simpleOption' \"name\" \"\" \"The user's name.\"
--
--hello :: MainOptions -> HelloOpts -> [String] -> IO ()
--hello mainOpts opts args = unless (optQuiet mainOpts) $ do
-- putStrLn (optHello opts)
--
--bye :: MainOptions -> ByeOpts -> [String] -> IO ()
--bye mainOpts opts args = unless (optQuiet mainOpts) $ do
-- putStrLn (\"Good bye \" ++ optName opts)
--
--main :: IO ()
--main = 'runSubcommand'
-- [ 'subcommand' \"hello\" hello
-- , 'subcommand' \"bye\" bye
-- ]
-- @
--
-- >$ ./app hello
-- >Hello!
-- >$ ./app hello --hello='Allo!'
-- >Allo!
-- >$ ./app bye
-- >Good bye
-- >$ ./app bye --name='Alice'
-- >Good bye Alice
runSubcommand :: (Options opts, MonadIO m) => [Subcommand opts (m a)] -> m a
runSubcommand subcommands = do
argv <- liftIO System.Environment.getArgs
let parsed = parseSubcommand subcommands argv
case parsedSubcommand parsed of
Just cmd -> cmd
Nothing -> liftIO $ case parsedError parsed of
Just err -> do
hPutStrLn stderr (parsedHelp parsed)
hPutStrLn stderr err
exitFailure
Nothing -> do
hPutStr stdout (parsedHelp parsed)
exitSuccess
| jmillikin/haskell-options | lib/Options.hs | mit | 37,706 | 135 | 21 | 7,539 | 7,360 | 3,982 | 3,378 | 504 | 10 |
{-# LANGUAGE DeriveDataTypeable #-}
module InnerEar.Types.GScore where
import Text.JSON
import Text.JSON.Generic
import Data.Map
data GScore = GScore {
score :: Double,
outOf :: Double
} deriving (Eq, Typeable, Data)
-- asPercent' :: GScore -> Double
-- asPercent (GScore 0 b) = 0.0
-- asPercent (GScore a b) = a/b
asPercent :: GScore -> Double
asPercent (GScore 0 b) = 0.0 :: Double
asPercent (GScore a b) = ((a/b)*100.0) :: Double
instance Show GScore where
show (GScore a b) = show (round a) ++ "/" ++ show (round b)
| JamieBeverley/InnerEar | src/InnerEar/Types/GScore.hs | gpl-3.0 | 533 | 0 | 10 | 101 | 174 | 97 | 77 | 14 | 1 |
#!/usr/bin/env stack
{- stack runghc --verbosity info --package pandoc-types -}
-- Ensure level 1 and 2 headings are first-letter-capitalised.
import Data.Char
import Text.Pandoc.JSON
import Text.Pandoc.Walk
main :: IO ()
main = toJSONFilter capitalizeHeaders
capitalizeHeaders :: Block -> Block
capitalizeHeaders (Header lvl attr xs) | lvl < 3 = Header lvl attr $ map capitalize (take 1 xs) ++ drop 1 xs
capitalizeHeaders x = x
capitalize :: Inline -> Inline
capitalize (Str s) = Str $ map toUpper (take 1 s) ++ map toLower (drop 1 s)
capitalize x = x
{-
capitalizeHeaderLinks :: Inline -> Inline
capitalizeHeaderLinks (Link xs t@('#':_,_)) = Link (walk capitalize xs) t
capitalizeHeaderLinks x = x
-}
| ony/hledger | tools/pandoc-capitalize-headers.hs | gpl-3.0 | 708 | 0 | 10 | 117 | 181 | 92 | 89 | 11 | 1 |
{-# LANGUAGE DataKinds #-}
{-# LANGUAGE DeriveGeneric #-}
{-# LANGUAGE FlexibleInstances #-}
{-# LANGUAGE GeneralizedNewtypeDeriving #-}
{-# LANGUAGE LambdaCase #-}
{-# LANGUAGE NoImplicitPrelude #-}
{-# LANGUAGE OverloadedStrings #-}
{-# LANGUAGE RecordWildCards #-}
{-# LANGUAGE TypeFamilies #-}
{-# LANGUAGE ViewPatterns #-}
{-# OPTIONS_GHC -fno-warn-unused-imports #-}
-- Module : Network.AWS.DynamoDB.Types
-- Copyright : (c) 2013-2014 Brendan Hay <brendan.g.hay@gmail.com>
-- License : This Source Code Form is subject to the terms of
-- the Mozilla Public License, v. 2.0.
-- A copy of the MPL can be found in the LICENSE file or
-- you can obtain it at http://mozilla.org/MPL/2.0/.
-- Maintainer : Brendan Hay <brendan.g.hay@gmail.com>
-- Stability : experimental
-- Portability : non-portable (GHC extensions)
--
-- Derived from AWS service descriptions, licensed under Apache 2.0.
module Network.AWS.DynamoDB.Types
(
-- * Service
DynamoDB
-- ** Error
, JSONError
-- * WriteRequest
, WriteRequest
, writeRequest
, wDeleteRequest
, wPutRequest
-- * ProvisionedThroughputDescription
, ProvisionedThroughputDescription
, provisionedThroughputDescription
, ptdLastDecreaseDateTime
, ptdLastIncreaseDateTime
, ptdNumberOfDecreasesToday
, ptdReadCapacityUnits
, ptdWriteCapacityUnits
-- * KeyType
, KeyType (..)
-- * AttributeValue
, AttributeValue
, attributeValue
, avB
, avBOOL
, avBS
, avL
, avM
, avN
, avNS
, avNULL
, avS
, avSS
-- * IndexStatus
, IndexStatus (..)
-- * ProvisionedThroughput
, ProvisionedThroughput
, provisionedThroughput
, ptReadCapacityUnits
, ptWriteCapacityUnits
-- * TableStatus
, TableStatus (..)
-- * ProjectionType
, ProjectionType (..)
-- * TableDescription
, TableDescription
, tableDescription
, tdAttributeDefinitions
, tdCreationDateTime
, tdGlobalSecondaryIndexes
, tdItemCount
, tdKeySchema
, tdLocalSecondaryIndexes
, tdProvisionedThroughput
, tdTableName
, tdTableSizeBytes
, tdTableStatus
-- * KeysAndAttributes
, KeysAndAttributes
, keysAndAttributes
, kaaAttributesToGet
, kaaConsistentRead
, kaaExpressionAttributeNames
, kaaKeys
, kaaProjectionExpression
-- * ReturnConsumedCapacity
, ReturnConsumedCapacity (..)
-- * ReturnItemCollectionMetrics
, ReturnItemCollectionMetrics (..)
-- * AttributeValueUpdate
, AttributeValueUpdate
, attributeValueUpdate
, avuAction
, avuValue
-- * ExpectedAttributeValue
, ExpectedAttributeValue
, expectedAttributeValue
, eavAttributeValueList
, eavComparisonOperator
, eavExists
, eavValue
-- * AttributeDefinition
, AttributeDefinition
, attributeDefinition
, adAttributeName
, adAttributeType
-- * ComparisonOperator
, ComparisonOperator (..)
-- * ReturnValue
, ReturnValue (..)
-- * LocalSecondaryIndex
, LocalSecondaryIndex
, localSecondaryIndex
, lsiIndexName
, lsiKeySchema
, lsiProjection
-- * GlobalSecondaryIndexDescription
, GlobalSecondaryIndexDescription
, globalSecondaryIndexDescription
, gsidBackfilling
, gsidIndexName
, gsidIndexSizeBytes
, gsidIndexStatus
, gsidItemCount
, gsidKeySchema
, gsidProjection
, gsidProvisionedThroughput
-- * ItemCollectionMetrics
, ItemCollectionMetrics
, itemCollectionMetrics
, icmItemCollectionKey
, icmSizeEstimateRangeGB
-- * Capacity
, Capacity
, capacity
, cCapacityUnits
-- * ConsumedCapacity
, ConsumedCapacity
, consumedCapacity
, ccCapacityUnits
, ccGlobalSecondaryIndexes
, ccLocalSecondaryIndexes
, ccTable
, ccTableName
-- * GlobalSecondaryIndex
, GlobalSecondaryIndex
, globalSecondaryIndex
, gsiIndexName
, gsiKeySchema
, gsiProjection
, gsiProvisionedThroughput
-- * LocalSecondaryIndexDescription
, LocalSecondaryIndexDescription
, localSecondaryIndexDescription
, lsidIndexName
, lsidIndexSizeBytes
, lsidItemCount
, lsidKeySchema
, lsidProjection
-- * AttributeAction
, AttributeAction (..)
-- * ScalarAttributeType
, ScalarAttributeType (..)
-- * Projection
, Projection
, projection
, pNonKeyAttributes
, pProjectionType
-- * CreateGlobalSecondaryIndexAction
, CreateGlobalSecondaryIndexAction
, createGlobalSecondaryIndexAction
, cgsiaIndexName
, cgsiaKeySchema
, cgsiaProjection
, cgsiaProvisionedThroughput
-- * Select
, Select (..)
-- * KeySchemaElement
, KeySchemaElement
, keySchemaElement
, kseAttributeName
, kseKeyType
-- * DeleteGlobalSecondaryIndexAction
, DeleteGlobalSecondaryIndexAction
, deleteGlobalSecondaryIndexAction
, dgsiaIndexName
-- * DeleteRequest
, DeleteRequest
, deleteRequest
, dKey
-- * UpdateGlobalSecondaryIndexAction
, UpdateGlobalSecondaryIndexAction
, updateGlobalSecondaryIndexAction
, ugsiaIndexName
, ugsiaProvisionedThroughput
-- * PutRequest
, PutRequest
, putRequest
, pItem
-- * Condition
, Condition
, condition
, cAttributeValueList
, cComparisonOperator
-- * ConditionalOperator
, ConditionalOperator (..)
-- * GlobalSecondaryIndexUpdate
, GlobalSecondaryIndexUpdate
, globalSecondaryIndexUpdate
, gsiuCreate
, gsiuDelete
, gsiuUpdate
) where
import Network.AWS.Prelude
import Network.AWS.Signing
import qualified GHC.Exts
-- | Version @2012-08-10@ of the Amazon DynamoDB service.
data DynamoDB
instance AWSService DynamoDB where
type Sg DynamoDB = V4
type Er DynamoDB = JSONError
service = service'
where
service' :: Service DynamoDB
service' = Service
{ _svcAbbrev = "DynamoDB"
, _svcPrefix = "dynamodb"
, _svcVersion = "2012-08-10"
, _svcTargetPrefix = Just "DynamoDB_20120810"
, _svcJSONVersion = Just "1.0"
, _svcHandle = handle
, _svcRetry = retry
}
handle :: Status
-> Maybe (LazyByteString -> ServiceError JSONError)
handle = jsonError statusSuccess service'
retry :: Retry DynamoDB
retry = Exponential
{ _retryBase = 0.05
, _retryGrowth = 2
, _retryAttempts = 10
, _retryCheck = check
}
check :: Status
-> JSONError
-> Bool
check (statusCode -> s) (awsErrorCode -> e)
| s == 400 && "ThrottlingException" == e = True -- Throttling
| s == 400 && "ProvisionedThroughputExceededException" == e = True -- Throughput Exceeded
| s == 500 = True -- General Server Error
| s == 509 = True -- Limit Exceeded
| s == 503 = True -- Service Unavailable
| otherwise = False
data WriteRequest = WriteRequest
{ _wDeleteRequest :: Maybe DeleteRequest
, _wPutRequest :: Maybe PutRequest
} deriving (Eq, Read, Show)
-- | 'WriteRequest' constructor.
--
-- The fields accessible through corresponding lenses are:
--
-- * 'wDeleteRequest' @::@ 'Maybe' 'DeleteRequest'
--
-- * 'wPutRequest' @::@ 'Maybe' 'PutRequest'
--
writeRequest :: WriteRequest
writeRequest = WriteRequest
{ _wPutRequest = Nothing
, _wDeleteRequest = Nothing
}
-- | A request to perform a /DeleteItem/ operation.
wDeleteRequest :: Lens' WriteRequest (Maybe DeleteRequest)
wDeleteRequest = lens _wDeleteRequest (\s a -> s { _wDeleteRequest = a })
-- | A request to perform a /PutItem/ operation.
wPutRequest :: Lens' WriteRequest (Maybe PutRequest)
wPutRequest = lens _wPutRequest (\s a -> s { _wPutRequest = a })
instance FromJSON WriteRequest where
parseJSON = withObject "WriteRequest" $ \o -> WriteRequest
<$> o .:? "DeleteRequest"
<*> o .:? "PutRequest"
instance ToJSON WriteRequest where
toJSON WriteRequest{..} = object
[ "PutRequest" .= _wPutRequest
, "DeleteRequest" .= _wDeleteRequest
]
data ProvisionedThroughputDescription = ProvisionedThroughputDescription
{ _ptdLastDecreaseDateTime :: Maybe POSIX
, _ptdLastIncreaseDateTime :: Maybe POSIX
, _ptdNumberOfDecreasesToday :: Maybe Nat
, _ptdReadCapacityUnits :: Maybe Nat
, _ptdWriteCapacityUnits :: Maybe Nat
} deriving (Eq, Ord, Read, Show)
-- | 'ProvisionedThroughputDescription' constructor.
--
-- The fields accessible through corresponding lenses are:
--
-- * 'ptdLastDecreaseDateTime' @::@ 'Maybe' 'UTCTime'
--
-- * 'ptdLastIncreaseDateTime' @::@ 'Maybe' 'UTCTime'
--
-- * 'ptdNumberOfDecreasesToday' @::@ 'Maybe' 'Natural'
--
-- * 'ptdReadCapacityUnits' @::@ 'Maybe' 'Natural'
--
-- * 'ptdWriteCapacityUnits' @::@ 'Maybe' 'Natural'
--
provisionedThroughputDescription :: ProvisionedThroughputDescription
provisionedThroughputDescription = ProvisionedThroughputDescription
{ _ptdLastIncreaseDateTime = Nothing
, _ptdLastDecreaseDateTime = Nothing
, _ptdNumberOfDecreasesToday = Nothing
, _ptdReadCapacityUnits = Nothing
, _ptdWriteCapacityUnits = Nothing
}
-- | The date and time of the last provisioned throughput decrease for this table.
ptdLastDecreaseDateTime :: Lens' ProvisionedThroughputDescription (Maybe UTCTime)
ptdLastDecreaseDateTime =
lens _ptdLastDecreaseDateTime (\s a -> s { _ptdLastDecreaseDateTime = a })
. mapping _Time
-- | The date and time of the last provisioned throughput increase for this table.
ptdLastIncreaseDateTime :: Lens' ProvisionedThroughputDescription (Maybe UTCTime)
ptdLastIncreaseDateTime =
lens _ptdLastIncreaseDateTime (\s a -> s { _ptdLastIncreaseDateTime = a })
. mapping _Time
-- | The number of provisioned throughput decreases for this table during this UTC
-- calendar day. For current maximums on provisioned throughput decreases, see <http://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Limits.html Limits> in the /Amazon DynamoDB Developer Guide/.
ptdNumberOfDecreasesToday :: Lens' ProvisionedThroughputDescription (Maybe Natural)
ptdNumberOfDecreasesToday =
lens _ptdNumberOfDecreasesToday
(\s a -> s { _ptdNumberOfDecreasesToday = a })
. mapping _Nat
-- | The maximum number of strongly consistent reads consumed per second before
-- DynamoDB returns a /ThrottlingException/. Eventually consistent reads require
-- less effort than strongly consistent reads, so a setting of 50 /ReadCapacityUnits/ per second provides 100 eventually consistent /ReadCapacityUnits/ per second.
ptdReadCapacityUnits :: Lens' ProvisionedThroughputDescription (Maybe Natural)
ptdReadCapacityUnits =
lens _ptdReadCapacityUnits (\s a -> s { _ptdReadCapacityUnits = a })
. mapping _Nat
-- | The maximum number of writes consumed per second before DynamoDB returns a /ThrottlingException/.
ptdWriteCapacityUnits :: Lens' ProvisionedThroughputDescription (Maybe Natural)
ptdWriteCapacityUnits =
lens _ptdWriteCapacityUnits (\s a -> s { _ptdWriteCapacityUnits = a })
. mapping _Nat
instance FromJSON ProvisionedThroughputDescription where
parseJSON = withObject "ProvisionedThroughputDescription" $ \o -> ProvisionedThroughputDescription
<$> o .:? "LastDecreaseDateTime"
<*> o .:? "LastIncreaseDateTime"
<*> o .:? "NumberOfDecreasesToday"
<*> o .:? "ReadCapacityUnits"
<*> o .:? "WriteCapacityUnits"
instance ToJSON ProvisionedThroughputDescription where
toJSON ProvisionedThroughputDescription{..} = object
[ "LastIncreaseDateTime" .= _ptdLastIncreaseDateTime
, "LastDecreaseDateTime" .= _ptdLastDecreaseDateTime
, "NumberOfDecreasesToday" .= _ptdNumberOfDecreasesToday
, "ReadCapacityUnits" .= _ptdReadCapacityUnits
, "WriteCapacityUnits" .= _ptdWriteCapacityUnits
]
data KeyType
= Hash -- ^ HASH
| Range -- ^ RANGE
deriving (Eq, Ord, Read, Show, Generic, Enum)
instance Hashable KeyType
instance FromText KeyType where
parser = takeLowerText >>= \case
"hash" -> pure Hash
"range" -> pure Range
e -> fail $
"Failure parsing KeyType from " ++ show e
instance ToText KeyType where
toText = \case
Hash -> "HASH"
Range -> "RANGE"
instance ToByteString KeyType
instance ToHeader KeyType
instance ToQuery KeyType
instance FromJSON KeyType where
parseJSON = parseJSONText "KeyType"
instance ToJSON KeyType where
toJSON = toJSONText
data AttributeValue = AttributeValue
{ _avB :: Maybe Base64
, _avBOOL :: Maybe Bool
, _avBS :: List "BS" Base64
, _avL :: List "L" AttributeValue
, _avM :: Map Text AttributeValue
, _avN :: Maybe Text
, _avNS :: List "NS" Text
, _avNULL :: Maybe Bool
, _avS :: Maybe Text
, _avSS :: List "SS" Text
} deriving (Eq, Read, Show)
-- | 'AttributeValue' constructor.
--
-- The fields accessible through corresponding lenses are:
--
-- * 'avB' @::@ 'Maybe' 'Base64'
--
-- * 'avBOOL' @::@ 'Maybe' 'Bool'
--
-- * 'avBS' @::@ ['Base64']
--
-- * 'avL' @::@ ['AttributeValue']
--
-- * 'avM' @::@ 'HashMap' 'Text' 'AttributeValue'
--
-- * 'avN' @::@ 'Maybe' 'Text'
--
-- * 'avNS' @::@ ['Text']
--
-- * 'avNULL' @::@ 'Maybe' 'Bool'
--
-- * 'avS' @::@ 'Maybe' 'Text'
--
-- * 'avSS' @::@ ['Text']
--
attributeValue :: AttributeValue
attributeValue = AttributeValue
{ _avS = Nothing
, _avN = Nothing
, _avB = Nothing
, _avSS = mempty
, _avNS = mempty
, _avBS = mempty
, _avM = mempty
, _avL = mempty
, _avNULL = Nothing
, _avBOOL = Nothing
}
-- | A Binary data type.
avB :: Lens' AttributeValue (Maybe Base64)
avB = lens _avB (\s a -> s { _avB = a })
-- | A Boolean data type.
avBOOL :: Lens' AttributeValue (Maybe Bool)
avBOOL = lens _avBOOL (\s a -> s { _avBOOL = a })
-- | A Binary Set data type.
avBS :: Lens' AttributeValue [Base64]
avBS = lens _avBS (\s a -> s { _avBS = a }) . _List
-- | A List of attribute values.
avL :: Lens' AttributeValue [AttributeValue]
avL = lens _avL (\s a -> s { _avL = a }) . _List
-- | A Map of attribute values.
avM :: Lens' AttributeValue (HashMap Text AttributeValue)
avM = lens _avM (\s a -> s { _avM = a }) . _Map
-- | A Number data type.
avN :: Lens' AttributeValue (Maybe Text)
avN = lens _avN (\s a -> s { _avN = a })
-- | A Number Set data type.
avNS :: Lens' AttributeValue [Text]
avNS = lens _avNS (\s a -> s { _avNS = a }) . _List
-- | A Null data type.
avNULL :: Lens' AttributeValue (Maybe Bool)
avNULL = lens _avNULL (\s a -> s { _avNULL = a })
-- | A String data type.
avS :: Lens' AttributeValue (Maybe Text)
avS = lens _avS (\s a -> s { _avS = a })
-- | A String Set data type.
avSS :: Lens' AttributeValue [Text]
avSS = lens _avSS (\s a -> s { _avSS = a }) . _List
instance FromJSON AttributeValue where
parseJSON = withObject "AttributeValue" $ \o -> AttributeValue
<$> o .:? "B"
<*> o .:? "BOOL"
<*> o .:? "BS" .!= mempty
<*> o .:? "L" .!= mempty
<*> o .:? "M" .!= mempty
<*> o .:? "N"
<*> o .:? "NS" .!= mempty
<*> o .:? "NULL"
<*> o .:? "S"
<*> o .:? "SS" .!= mempty
instance ToJSON AttributeValue where
toJSON AttributeValue{..} = object
[ "S" .= _avS
, "N" .= _avN
, "B" .= _avB
, "SS" .= _avSS
, "NS" .= _avNS
, "BS" .= _avBS
, "M" .= _avM
, "L" .= _avL
, "NULL" .= _avNULL
, "BOOL" .= _avBOOL
]
data IndexStatus
= Active -- ^ ACTIVE
| Creating -- ^ CREATING
| Deleting -- ^ DELETING
| Updating -- ^ UPDATING
deriving (Eq, Ord, Read, Show, Generic, Enum)
instance Hashable IndexStatus
instance FromText IndexStatus where
parser = takeLowerText >>= \case
"active" -> pure Active
"creating" -> pure Creating
"deleting" -> pure Deleting
"updating" -> pure Updating
e -> fail $
"Failure parsing IndexStatus from " ++ show e
instance ToText IndexStatus where
toText = \case
Active -> "ACTIVE"
Creating -> "CREATING"
Deleting -> "DELETING"
Updating -> "UPDATING"
instance ToByteString IndexStatus
instance ToHeader IndexStatus
instance ToQuery IndexStatus
instance FromJSON IndexStatus where
parseJSON = parseJSONText "IndexStatus"
instance ToJSON IndexStatus where
toJSON = toJSONText
data ProvisionedThroughput = ProvisionedThroughput
{ _ptReadCapacityUnits :: Nat
, _ptWriteCapacityUnits :: Nat
} deriving (Eq, Ord, Read, Show)
-- | 'ProvisionedThroughput' constructor.
--
-- The fields accessible through corresponding lenses are:
--
-- * 'ptReadCapacityUnits' @::@ 'Natural'
--
-- * 'ptWriteCapacityUnits' @::@ 'Natural'
--
provisionedThroughput :: Natural -- ^ 'ptReadCapacityUnits'
-> Natural -- ^ 'ptWriteCapacityUnits'
-> ProvisionedThroughput
provisionedThroughput p1 p2 = ProvisionedThroughput
{ _ptReadCapacityUnits = withIso _Nat (const id) p1
, _ptWriteCapacityUnits = withIso _Nat (const id) p2
}
-- | The maximum number of strongly consistent reads consumed per second before
-- DynamoDB returns a /ThrottlingException/. For more information, see <http://docs.aws.amazon.com/amazondynamodb/latest/developerguide/WorkingWithTables.html#ProvisionedThroughput SpecifyingRead and Write Requirements> in the /Amazon DynamoDB Developer Guide/.
ptReadCapacityUnits :: Lens' ProvisionedThroughput Natural
ptReadCapacityUnits =
lens _ptReadCapacityUnits (\s a -> s { _ptReadCapacityUnits = a })
. _Nat
-- | The maximum number of writes consumed per second before DynamoDB returns a /ThrottlingException/. For more information, see <http://docs.aws.amazon.com/amazondynamodb/latest/developerguide/WorkingWithTables.html#ProvisionedThroughput Specifying Read and Write Requirements> in the /Amazon DynamoDB Developer Guide/.
ptWriteCapacityUnits :: Lens' ProvisionedThroughput Natural
ptWriteCapacityUnits =
lens _ptWriteCapacityUnits (\s a -> s { _ptWriteCapacityUnits = a })
. _Nat
instance FromJSON ProvisionedThroughput where
parseJSON = withObject "ProvisionedThroughput" $ \o -> ProvisionedThroughput
<$> o .: "ReadCapacityUnits"
<*> o .: "WriteCapacityUnits"
instance ToJSON ProvisionedThroughput where
toJSON ProvisionedThroughput{..} = object
[ "ReadCapacityUnits" .= _ptReadCapacityUnits
, "WriteCapacityUnits" .= _ptWriteCapacityUnits
]
data TableStatus
= TSActive -- ^ ACTIVE
| TSCreating -- ^ CREATING
| TSDeleting -- ^ DELETING
| TSUpdating -- ^ UPDATING
deriving (Eq, Ord, Read, Show, Generic, Enum)
instance Hashable TableStatus
instance FromText TableStatus where
parser = takeLowerText >>= \case
"active" -> pure TSActive
"creating" -> pure TSCreating
"deleting" -> pure TSDeleting
"updating" -> pure TSUpdating
e -> fail $
"Failure parsing TableStatus from " ++ show e
instance ToText TableStatus where
toText = \case
TSActive -> "ACTIVE"
TSCreating -> "CREATING"
TSDeleting -> "DELETING"
TSUpdating -> "UPDATING"
instance ToByteString TableStatus
instance ToHeader TableStatus
instance ToQuery TableStatus
instance FromJSON TableStatus where
parseJSON = parseJSONText "TableStatus"
instance ToJSON TableStatus where
toJSON = toJSONText
data ProjectionType
= All -- ^ ALL
| Include -- ^ INCLUDE
| KeysOnly -- ^ KEYS_ONLY
deriving (Eq, Ord, Read, Show, Generic, Enum)
instance Hashable ProjectionType
instance FromText ProjectionType where
parser = takeLowerText >>= \case
"all" -> pure All
"include" -> pure Include
"keys_only" -> pure KeysOnly
e -> fail $
"Failure parsing ProjectionType from " ++ show e
instance ToText ProjectionType where
toText = \case
All -> "ALL"
Include -> "INCLUDE"
KeysOnly -> "KEYS_ONLY"
instance ToByteString ProjectionType
instance ToHeader ProjectionType
instance ToQuery ProjectionType
instance FromJSON ProjectionType where
parseJSON = parseJSONText "ProjectionType"
instance ToJSON ProjectionType where
toJSON = toJSONText
data TableDescription = TableDescription
{ _tdAttributeDefinitions :: List "AttributeDefinitions" AttributeDefinition
, _tdCreationDateTime :: POSIX
, _tdGlobalSecondaryIndexes :: List "GlobalSecondaryIndexes" GlobalSecondaryIndexDescription
, _tdItemCount :: Integer
, _tdKeySchema :: List1 "KeySchema" KeySchemaElement
, _tdLocalSecondaryIndexes :: List "LocalSecondaryIndexes" LocalSecondaryIndexDescription
, _tdProvisionedThroughput :: ProvisionedThroughputDescription
, _tdTableName :: Text
, _tdTableSizeBytes :: Integer
, _tdTableStatus :: TableStatus
} deriving (Eq, Read, Show)
-- | 'TableDescription' constructor.
--
-- The fields accessible through corresponding lenses are:
--
-- * 'tdAttributeDefinitions' @::@ ['AttributeDefinition']
--
-- * 'tdCreationDateTime' @::@ 'UTCTime'
--
-- * 'tdGlobalSecondaryIndexes' @::@ ['GlobalSecondaryIndexDescription']
--
-- * 'tdItemCount' @::@ 'Integer'
--
-- * 'tdKeySchema' @::@ 'NonEmpty' 'KeySchemaElement'
--
-- * 'tdLocalSecondaryIndexes' @::@ ['LocalSecondaryIndexDescription']
--
-- * 'tdProvisionedThroughput' @::@ 'ProvisionedThroughputDescription'
--
-- * 'tdTableName' @::@ 'Text'
--
-- * 'tdTableSizeBytes' @::@ 'Integer'
--
-- * 'tdTableStatus' @::@ 'TableStatus'
--
tableDescription :: Text -- ^ 'tdTableName'
-> NonEmpty KeySchemaElement -- ^ 'tdKeySchema'
-> TableStatus -- ^ 'tdTableStatus'
-> UTCTime -- ^ 'tdCreationDateTime'
-> ProvisionedThroughputDescription -- ^ 'tdProvisionedThroughput'
-> Integer -- ^ 'tdTableSizeBytes'
-> Integer -- ^ 'tdItemCount'
-> TableDescription
tableDescription p1 p2 p3 p4 p5 p6 p7 = TableDescription
{ _tdTableName = p1
, _tdKeySchema = withIso _List1 (const id) p2
, _tdTableStatus = p3
, _tdCreationDateTime = withIso _Time (const id) p4
, _tdProvisionedThroughput = p5
, _tdTableSizeBytes = p6
, _tdItemCount = p7
, _tdAttributeDefinitions = mempty
, _tdLocalSecondaryIndexes = mempty
, _tdGlobalSecondaryIndexes = mempty
}
-- | An array of /AttributeDefinition/ objects. Each of these objects describes one
-- attribute in the table and index key schema.
--
-- Each /AttributeDefinition/ object in this array is composed of:
--
-- /AttributeName/ - The name of the attribute.
--
-- /AttributeType/ - The data type for the attribute.
--
--
tdAttributeDefinitions :: Lens' TableDescription [AttributeDefinition]
tdAttributeDefinitions =
lens _tdAttributeDefinitions (\s a -> s { _tdAttributeDefinitions = a })
. _List
-- | The date and time when the table was created, in <http://www.epochconverter.com/ UNIX epoch time> format.
tdCreationDateTime :: Lens' TableDescription UTCTime
tdCreationDateTime =
lens _tdCreationDateTime (\s a -> s { _tdCreationDateTime = a })
. _Time
-- | The global secondary indexes, if any, on the table. Each index is scoped to a
-- given hash key value. Each element is composed of:
--
-- /Backfilling/ - If true, then the index is currently in the backfilling
-- phase. Backfilling occurs only when a new global secondary index is added to
-- the table; it is the process by which DynamoDB populates the new index with
-- data from the table. (This attribute does not appear for indexes that were
-- created during a /CreateTable/ operation.)
--
-- /IndexName/ - The name of the global secondary index.
--
-- /IndexSizeBytes/ - The total size of the global secondary index, in bytes.
-- DynamoDB updates this value approximately every six hours. Recent changes
-- might not be reflected in this value.
--
-- /IndexStatus/ - The current status of the global secondary index:
--
-- /CREATING/ - The index is being created.
--
-- /UPDATING/ - The index is being updated.
--
-- /DELETING/ - The index is being deleted.
--
-- /ACTIVE/ - The index is ready for use.
--
-- /ItemCount/ - The number of items in the global secondary index. DynamoDB
-- updates this value approximately every six hours. Recent changes might not be
-- reflected in this value.
--
-- /KeySchema/ - Specifies the complete index key schema. The attribute names
-- in the key schema must be between 1 and 255 characters (inclusive). The key
-- schema must begin with the same hash key attribute as the table.
--
-- /Projection/ - Specifies attributes that are copied (projected) from the
-- table into the index. These are in addition to the primary key attributes and
-- index key attributes, which are automatically projected. Each attribute
-- specification is composed of:
--
-- /ProjectionType/ - One of the following:
--
-- 'KEYS_ONLY' - Only the index and primary keys are projected into the index.
--
-- 'INCLUDE' - Only the specified table attributes are projected into the
-- index. The list of projected attributes are in /NonKeyAttributes/.
--
-- 'ALL' - All of the table attributes are projected into the index.
--
-- /NonKeyAttributes/ - A list of one or more non-key attribute names that
-- are projected into the secondary index. The total count of attributes
-- provided in /NonKeyAttributes/, summed across all of the secondary indexes,
-- must not exceed 20. If you project the same attribute into two different
-- indexes, this counts as two distinct attributes when determining the total.
--
-- /ProvisionedThroughput/ - The provisioned throughput settings for the
-- global secondary index, consisting of read and write capacity units, along
-- with data about increases and decreases.
--
-- If the table is in the 'DELETING' state, no information about indexes will
-- be returned.
tdGlobalSecondaryIndexes :: Lens' TableDescription [GlobalSecondaryIndexDescription]
tdGlobalSecondaryIndexes =
lens _tdGlobalSecondaryIndexes
(\s a -> s { _tdGlobalSecondaryIndexes = a })
. _List
-- | The number of items in the specified table. DynamoDB updates this value
-- approximately every six hours. Recent changes might not be reflected in this
-- value.
tdItemCount :: Lens' TableDescription Integer
tdItemCount = lens _tdItemCount (\s a -> s { _tdItemCount = a })
-- | The primary key structure for the table. Each /KeySchemaElement/ consists of:
--
-- /AttributeName/ - The name of the attribute.
--
-- /KeyType/ - The key type for the attribute. Can be either 'HASH' or 'RANGE'.
--
-- For more information about primary keys, see <http://docs.aws.amazon.com/amazondynamodb/latest/developerguide/DataModel.html#DataModelPrimaryKey Primary Key> in the /AmazonDynamoDB Developer Guide/.
tdKeySchema :: Lens' TableDescription (NonEmpty KeySchemaElement)
tdKeySchema = lens _tdKeySchema (\s a -> s { _tdKeySchema = a }) . _List1
-- | Represents one or more local secondary indexes on the table. Each index is
-- scoped to a given hash key value. Tables with one or more local secondary
-- indexes are subject to an item collection size limit, where the amount of
-- data within a given item collection cannot exceed 10 GB. Each element is
-- composed of:
--
-- /IndexName/ - The name of the local secondary index.
--
-- /KeySchema/ - Specifies the complete index key schema. The attribute names
-- in the key schema must be between 1 and 255 characters (inclusive). The key
-- schema must begin with the same hash key attribute as the table.
--
-- /Projection/ - Specifies attributes that are copied (projected) from the
-- table into the index. These are in addition to the primary key attributes and
-- index key attributes, which are automatically projected. Each attribute
-- specification is composed of:
--
-- /ProjectionType/ - One of the following:
--
-- 'KEYS_ONLY' - Only the index and primary keys are projected into the index.
--
-- 'INCLUDE' - Only the specified table attributes are projected into the
-- index. The list of projected attributes are in /NonKeyAttributes/.
--
-- 'ALL' - All of the table attributes are projected into the index.
--
-- /NonKeyAttributes/ - A list of one or more non-key attribute names that
-- are projected into the secondary index. The total count of attributes
-- provided in /NonKeyAttributes/, summed across all of the secondary indexes,
-- must not exceed 20. If you project the same attribute into two different
-- indexes, this counts as two distinct attributes when determining the total.
--
-- /IndexSizeBytes/ - Represents the total size of the index, in bytes.
-- DynamoDB updates this value approximately every six hours. Recent changes
-- might not be reflected in this value.
--
-- /ItemCount/ - Represents the number of items in the index. DynamoDB updates
-- this value approximately every six hours. Recent changes might not be
-- reflected in this value.
--
-- If the table is in the 'DELETING' state, no information about indexes will
-- be returned.
tdLocalSecondaryIndexes :: Lens' TableDescription [LocalSecondaryIndexDescription]
tdLocalSecondaryIndexes =
lens _tdLocalSecondaryIndexes (\s a -> s { _tdLocalSecondaryIndexes = a })
. _List
-- | The provisioned throughput settings for the table, consisting of read and
-- write capacity units, along with data about increases and decreases.
tdProvisionedThroughput :: Lens' TableDescription ProvisionedThroughputDescription
tdProvisionedThroughput =
lens _tdProvisionedThroughput (\s a -> s { _tdProvisionedThroughput = a })
-- | The name of the table.
tdTableName :: Lens' TableDescription Text
tdTableName = lens _tdTableName (\s a -> s { _tdTableName = a })
-- | The total size of the specified table, in bytes. DynamoDB updates this value
-- approximately every six hours. Recent changes might not be reflected in this
-- value.
tdTableSizeBytes :: Lens' TableDescription Integer
tdTableSizeBytes = lens _tdTableSizeBytes (\s a -> s { _tdTableSizeBytes = a })
-- | The current state of the table:
--
-- /CREATING/ - The table is being created.
--
-- /UPDATING/ - The table is being updated.
--
-- /DELETING/ - The table is being deleted.
--
-- /ACTIVE/ - The table is ready for use.
--
--
tdTableStatus :: Lens' TableDescription TableStatus
tdTableStatus = lens _tdTableStatus (\s a -> s { _tdTableStatus = a })
instance FromJSON TableDescription where
parseJSON = withObject "TableDescription" $ \o -> TableDescription
<$> o .:? "AttributeDefinitions" .!= mempty
<*> o .: "CreationDateTime"
<*> o .:? "GlobalSecondaryIndexes" .!= mempty
<*> o .: "ItemCount"
<*> o .: "KeySchema"
<*> o .:? "LocalSecondaryIndexes" .!= mempty
<*> o .: "ProvisionedThroughput"
<*> o .: "TableName"
<*> o .: "TableSizeBytes"
<*> o .: "TableStatus"
instance ToJSON TableDescription where
toJSON TableDescription{..} = object
[ "AttributeDefinitions" .= _tdAttributeDefinitions
, "TableName" .= _tdTableName
, "KeySchema" .= _tdKeySchema
, "TableStatus" .= _tdTableStatus
, "CreationDateTime" .= _tdCreationDateTime
, "ProvisionedThroughput" .= _tdProvisionedThroughput
, "TableSizeBytes" .= _tdTableSizeBytes
, "ItemCount" .= _tdItemCount
, "LocalSecondaryIndexes" .= _tdLocalSecondaryIndexes
, "GlobalSecondaryIndexes" .= _tdGlobalSecondaryIndexes
]
data KeysAndAttributes = KeysAndAttributes
{ _kaaAttributesToGet :: List1 "AttributesToGet" Text
, _kaaConsistentRead :: Maybe Bool
, _kaaExpressionAttributeNames :: Map Text Text
, _kaaKeys :: List1 "Keys" (Map Text AttributeValue)
, _kaaProjectionExpression :: Maybe Text
} deriving (Eq, Read, Show)
-- | 'KeysAndAttributes' constructor.
--
-- The fields accessible through corresponding lenses are:
--
-- * 'kaaAttributesToGet' @::@ 'NonEmpty' 'Text'
--
-- * 'kaaConsistentRead' @::@ 'Maybe' 'Bool'
--
-- * 'kaaExpressionAttributeNames' @::@ 'HashMap' 'Text' 'Text'
--
-- * 'kaaKeys' @::@ 'NonEmpty' ('HashMap' 'Text' 'AttributeValue')
--
-- * 'kaaProjectionExpression' @::@ 'Maybe' 'Text'
--
keysAndAttributes :: NonEmpty (HashMap Text AttributeValue) -- ^ 'kaaKeys'
-> NonEmpty Text -- ^ 'kaaAttributesToGet'
-> KeysAndAttributes
keysAndAttributes p1 p2 = KeysAndAttributes
{ _kaaKeys = withIso _List1 (const id) p1
, _kaaAttributesToGet = withIso _List1 (const id) p2
, _kaaConsistentRead = Nothing
, _kaaProjectionExpression = Nothing
, _kaaExpressionAttributeNames = mempty
}
-- | One or more attributes to retrieve from the table or index. If no attribute
-- names are specified then all attributes will be returned. If any of the
-- specified attributes are not found, they will not appear in the result.
kaaAttributesToGet :: Lens' KeysAndAttributes (NonEmpty Text)
kaaAttributesToGet =
lens _kaaAttributesToGet (\s a -> s { _kaaAttributesToGet = a })
. _List1
-- | The consistency of a read operation. If set to 'true', then a strongly
-- consistent read is used; otherwise, an eventually consistent read is used.
kaaConsistentRead :: Lens' KeysAndAttributes (Maybe Bool)
kaaConsistentRead =
lens _kaaConsistentRead (\s a -> s { _kaaConsistentRead = a })
-- | One or more substitution tokens for attribute names in an expression. The
-- following are some use cases for using /ExpressionAttributeNames/:
--
-- To access an attribute whose name conflicts with a DynamoDB reserved word.
--
-- To create a placeholder for repeating occurrences of an attribute name in
-- an expression.
--
-- To prevent special characters in an attribute name from being
-- misinterpreted in an expression.
--
-- Use the # character in an expression to dereference an attribute name. For
-- example, consider the following attribute name:
--
-- 'Percentile'
--
-- The name of this attribute conflicts with a reserved word, so it cannot be
-- used directly in an expression. (For the complete list of reserved words, go
-- to <http://docs.aws.amazon.com/amazondynamodb/latest/developerguide/ReservedWords.html Reserved Words> in the /Amazon DynamoDB Developer Guide/). To work around
-- this, you could specify the following for /ExpressionAttributeNames/:
--
-- '{"#P":"Percentile"}'
--
-- You could then use this substitution in an expression, as in this example:
--
-- '#P = :val'
--
-- Tokens that begin with the : character are /expression attribute values/,
-- which are placeholders for the actual value at runtime.
--
-- For more information on expression attribute names, go to <http://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Expressions.AccessingItemAttributes.html Accessing ItemAttributes> in the /Amazon DynamoDB Developer Guide/.
kaaExpressionAttributeNames :: Lens' KeysAndAttributes (HashMap Text Text)
kaaExpressionAttributeNames =
lens _kaaExpressionAttributeNames
(\s a -> s { _kaaExpressionAttributeNames = a })
. _Map
-- | The primary key attribute values that define the items and the attributes
-- associated with the items.
kaaKeys :: Lens' KeysAndAttributes (NonEmpty (HashMap Text AttributeValue))
kaaKeys = lens _kaaKeys (\s a -> s { _kaaKeys = a }) . _List1
-- | A string that identifies one or more attributes to retrieve from the table.
-- These attributes can include scalars, sets, or elements of a JSON document.
-- The attributes in the /ProjectionExpression/ must be separated by commas.
--
-- If no attribute names are specified, then all attributes will be returned.
-- If any of the requested attributes are not found, they will not appear in the
-- result.
--
-- For more information, go to <http://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Expressions.AccessingItemAttributes.html Accessing Item Attributes> in the /Amazon DynamoDBDeveloper Guide/.
kaaProjectionExpression :: Lens' KeysAndAttributes (Maybe Text)
kaaProjectionExpression =
lens _kaaProjectionExpression (\s a -> s { _kaaProjectionExpression = a })
instance FromJSON KeysAndAttributes where
parseJSON = withObject "KeysAndAttributes" $ \o -> KeysAndAttributes
<$> o .: "AttributesToGet"
<*> o .:? "ConsistentRead"
<*> o .:? "ExpressionAttributeNames" .!= mempty
<*> o .: "Keys"
<*> o .:? "ProjectionExpression"
instance ToJSON KeysAndAttributes where
toJSON KeysAndAttributes{..} = object
[ "Keys" .= _kaaKeys
, "AttributesToGet" .= _kaaAttributesToGet
, "ConsistentRead" .= _kaaConsistentRead
, "ProjectionExpression" .= _kaaProjectionExpression
, "ExpressionAttributeNames" .= _kaaExpressionAttributeNames
]
data ReturnConsumedCapacity
= Indexes -- ^ INDEXES
| None -- ^ NONE
| Total -- ^ TOTAL
deriving (Eq, Ord, Read, Show, Generic, Enum)
instance Hashable ReturnConsumedCapacity
instance FromText ReturnConsumedCapacity where
parser = takeLowerText >>= \case
"indexes" -> pure Indexes
"none" -> pure None
"total" -> pure Total
e -> fail $
"Failure parsing ReturnConsumedCapacity from " ++ show e
instance ToText ReturnConsumedCapacity where
toText = \case
Indexes -> "INDEXES"
None -> "NONE"
Total -> "TOTAL"
instance ToByteString ReturnConsumedCapacity
instance ToHeader ReturnConsumedCapacity
instance ToQuery ReturnConsumedCapacity
instance FromJSON ReturnConsumedCapacity where
parseJSON = parseJSONText "ReturnConsumedCapacity"
instance ToJSON ReturnConsumedCapacity where
toJSON = toJSONText
data ReturnItemCollectionMetrics
= RICMNone -- ^ NONE
| RICMSize -- ^ SIZE
deriving (Eq, Ord, Read, Show, Generic, Enum)
instance Hashable ReturnItemCollectionMetrics
instance FromText ReturnItemCollectionMetrics where
parser = takeLowerText >>= \case
"none" -> pure RICMNone
"size" -> pure RICMSize
e -> fail $
"Failure parsing ReturnItemCollectionMetrics from " ++ show e
instance ToText ReturnItemCollectionMetrics where
toText = \case
RICMNone -> "NONE"
RICMSize -> "SIZE"
instance ToByteString ReturnItemCollectionMetrics
instance ToHeader ReturnItemCollectionMetrics
instance ToQuery ReturnItemCollectionMetrics
instance FromJSON ReturnItemCollectionMetrics where
parseJSON = parseJSONText "ReturnItemCollectionMetrics"
instance ToJSON ReturnItemCollectionMetrics where
toJSON = toJSONText
data AttributeValueUpdate = AttributeValueUpdate
{ _avuAction :: Maybe AttributeAction
, _avuValue :: Maybe AttributeValue
} deriving (Eq, Read, Show)
-- | 'AttributeValueUpdate' constructor.
--
-- The fields accessible through corresponding lenses are:
--
-- * 'avuAction' @::@ 'Maybe' 'AttributeAction'
--
-- * 'avuValue' @::@ 'Maybe' 'AttributeValue'
--
attributeValueUpdate :: AttributeValueUpdate
attributeValueUpdate = AttributeValueUpdate
{ _avuValue = Nothing
, _avuAction = Nothing
}
-- | Specifies how to perform the update. Valid values are 'PUT' (default), 'DELETE',
-- and 'ADD'. The behavior depends on whether the specified primary key already
-- exists in the table.
--
-- If an item with the specified /Key/ is found in the table:
--
-- 'PUT' - Adds the specified attribute to the item. If the attribute already
-- exists, it is replaced by the new value.
--
-- 'DELETE' - If no value is specified, the attribute and its value are removed
-- from the item. The data type of the specified value must match the existing
-- value's data type.
--
-- If a /set/ of values is specified, then those values are subtracted from the
-- old set. For example, if the attribute value was the set '[a,b,c]' and the /DELETE/ action specified '[a,c]', then the final attribute value would be '[b]'.
-- Specifying an empty set is an error.
--
-- 'ADD' - If the attribute does not already exist, then the attribute and its
-- values are added to the item. If the attribute does exist, then the behavior
-- of 'ADD' depends on the data type of the attribute:
--
-- If the existing attribute is a number, and if /Value/ is also a number, then
-- the /Value/ is mathematically added to the existing attribute. If /Value/ is a
-- negative number, then it is subtracted from the existing attribute.
--
-- If you use 'ADD' to increment or decrement a number value for an item that
-- doesn't exist before the update, DynamoDB uses 0 as the initial value.
--
-- In addition, if you use 'ADD' to update an existing item, and intend to
-- increment or decrement an attribute value which does not yet exist, DynamoDB
-- uses '0' as the initial value. For example, suppose that the item you want to
-- update does not yet have an attribute named /itemcount/, but you decide to 'ADD'
-- the number '3' to this attribute anyway, even though it currently does not
-- exist. DynamoDB will create the /itemcount/ attribute, set its initial value to '0', and finally add '3' to it. The result will be a new /itemcount/ attribute in
-- the item, with a value of '3'.
--
-- If the existing data type is a set, and if the /Value/ is also a set, then
-- the /Value/ is added to the existing set. (This is a /set/ operation, not
-- mathematical addition.) For example, if the attribute value was the set '[1,2]', and the
-- 'ADD' action specified '[3]', then the final attribute value would be '[1,2,3]'. An
-- error occurs if an Add action is specified for a set attribute and the
-- attribute type specified does not match the existing set type.
--
-- Both sets must have the same primitive data type. For example, if the
-- existing data type is a set of strings, the /Value/ must also be a set of
-- strings. The same holds true for number sets and binary sets.
--
-- This action is only valid for an existing attribute whose data type is
-- number or is a set. Do not use 'ADD' for any other data types.
--
-- If no item with the specified /Key/ is found:
--
-- 'PUT' - DynamoDB creates a new item with the specified primary key, and then
-- adds the attribute.
--
-- 'DELETE' - Nothing happens; there is no attribute to delete.
--
-- 'ADD' - DynamoDB creates an item with the supplied primary key and number
-- (or set of numbers) for the attribute value. The only data types allowed are
-- number and number set; no other data types can be specified.
--
--
avuAction :: Lens' AttributeValueUpdate (Maybe AttributeAction)
avuAction = lens _avuAction (\s a -> s { _avuAction = a })
avuValue :: Lens' AttributeValueUpdate (Maybe AttributeValue)
avuValue = lens _avuValue (\s a -> s { _avuValue = a })
instance FromJSON AttributeValueUpdate where
parseJSON = withObject "AttributeValueUpdate" $ \o -> AttributeValueUpdate
<$> o .:? "Action"
<*> o .:? "Value"
instance ToJSON AttributeValueUpdate where
toJSON AttributeValueUpdate{..} = object
[ "Value" .= _avuValue
, "Action" .= _avuAction
]
data ExpectedAttributeValue = ExpectedAttributeValue
{ _eavAttributeValueList :: List "AttributeValueList" AttributeValue
, _eavComparisonOperator :: Maybe ComparisonOperator
, _eavExists :: Maybe Bool
, _eavValue :: Maybe AttributeValue
} deriving (Eq, Read, Show)
-- | 'ExpectedAttributeValue' constructor.
--
-- The fields accessible through corresponding lenses are:
--
-- * 'eavAttributeValueList' @::@ ['AttributeValue']
--
-- * 'eavComparisonOperator' @::@ 'Maybe' 'ComparisonOperator'
--
-- * 'eavExists' @::@ 'Maybe' 'Bool'
--
-- * 'eavValue' @::@ 'Maybe' 'AttributeValue'
--
expectedAttributeValue :: ExpectedAttributeValue
expectedAttributeValue = ExpectedAttributeValue
{ _eavValue = Nothing
, _eavExists = Nothing
, _eavComparisonOperator = Nothing
, _eavAttributeValueList = mempty
}
-- | One or more values to evaluate against the supplied attribute. The number of
-- values in the list depends on the /ComparisonOperator/ being used.
--
-- For type Number, value comparisons are numeric.
--
-- String value comparisons for greater than, equals, or less than are based on
-- ASCII character code values. For example, 'a' is greater than 'A', and 'a' is
-- greater than 'B'. For a list of code values, see <http://en.wikipedia.org/wiki/ASCII#ASCII_printable_characters http://en.wikipedia.org/wiki/ASCII#ASCII_printable_characters>.
--
-- For Binary, DynamoDB treats each byte of the binary data as unsigned when it
-- compares binary values.
--
-- For information on specifying data types in JSON, see <http://docs.aws.amazon.com/amazondynamodb/latest/developerguide/DataFormat.html JSON Data Format> in
-- the /Amazon DynamoDB Developer Guide/.
eavAttributeValueList :: Lens' ExpectedAttributeValue [AttributeValue]
eavAttributeValueList =
lens _eavAttributeValueList (\s a -> s { _eavAttributeValueList = a })
. _List
-- | A comparator for evaluating attributes in the /AttributeValueList/. For
-- example, equals, greater than, less than, etc.
--
-- The following comparison operators are available:
--
-- 'EQ | NE | LE | LT | GE | GT | NOT_NULL | NULL | CONTAINS | NOT_CONTAINS |BEGINS_WITH | IN | BETWEEN'
--
-- The following are descriptions of each comparison operator.
--
-- 'EQ' : Equal. 'EQ' is supported for all datatypes, including lists and maps.
--
-- /AttributeValueList/ can contain only one /AttributeValue/ element of type
-- String, Number, Binary, String Set, Number Set, or Binary Set. If an item
-- contains an /AttributeValue/ element of a different type than the one provided
-- in the request, the value does not match. For example, '{"S":"6"}' does not
-- equal '{"N":"6"}'. Also, '{"N":"6"}' does not equal '{"NS":["6", "2", "1"]}'.
--
--
--
-- 'NE' : Not equal. 'NE' is supported for all datatypes, including lists and
-- maps.
--
-- /AttributeValueList/ can contain only one /AttributeValue/ of type String,
-- Number, Binary, String Set, Number Set, or Binary Set. If an item contains an /AttributeValue/ of a different type than the one provided in the request, the
-- value does not match. For example, '{"S":"6"}' does not equal '{"N":"6"}'. Also, '{"N":"6"}' does not equal '{"NS":["6", "2", "1"]}'.
--
--
--
-- 'LE' : Less than or equal.
--
-- /AttributeValueList/ can contain only one /AttributeValue/ element of type
-- String, Number, or Binary (not a set type). If an item contains an /AttributeValue/ element of a different type than the one provided in the request, the value
-- does not match. For example, '{"S":"6"}' does not equal '{"N":"6"}'. Also, '{"N":"6"}' does not compare to '{"NS":["6", "2", "1"]}'.
--
--
--
-- 'LT' : Less than.
--
-- /AttributeValueList/ can contain only one /AttributeValue/ of type String,
-- Number, or Binary (not a set type). If an item contains an /AttributeValue/
-- element of a different type than the one provided in the request, the value
-- does not match. For example, '{"S":"6"}' does not equal '{"N":"6"}'. Also, '{"N":"6"}' does not compare to '{"NS":["6", "2", "1"]}'.
--
--
--
-- 'GE' : Greater than or equal.
--
-- /AttributeValueList/ can contain only one /AttributeValue/ element of type
-- String, Number, or Binary (not a set type). If an item contains an /AttributeValue/ element of a different type than the one provided in the request, the value
-- does not match. For example, '{"S":"6"}' does not equal '{"N":"6"}'. Also, '{"N":"6"}' does not compare to '{"NS":["6", "2", "1"]}'.
--
--
--
-- 'GT' : Greater than.
--
-- /AttributeValueList/ can contain only one /AttributeValue/ element of type
-- String, Number, or Binary (not a set type). If an item contains an /AttributeValue/ element of a different type than the one provided in the request, the value
-- does not match. For example, '{"S":"6"}' does not equal '{"N":"6"}'. Also, '{"N":"6"}' does not compare to '{"NS":["6", "2", "1"]}'.
--
--
--
-- 'NOT_NULL' : The attribute exists. 'NOT_NULL' is supported for all datatypes,
-- including lists and maps.
--
-- This operator tests for the existence of an attribute, not its data type. If
-- the data type of attribute "'a'" is null, and you evaluate it using 'NOT_NULL',
-- the result is a Boolean /true/. This result is because the attribute "'a'"
-- exists; its data type is not relevant to the 'NOT_NULL' comparison operator.
--
-- 'NULL' : The attribute does not exist. 'NULL' is supported for all datatypes,
-- including lists and maps.
--
-- This operator tests for the nonexistence of an attribute, not its data type.
-- If the data type of attribute "'a'" is null, and you evaluate it using 'NULL',
-- the result is a Boolean /false/. This is because the attribute "'a'" exists; its
-- data type is not relevant to the 'NULL' comparison operator.
--
-- 'CONTAINS' : Checks for a subsequence, or value in a set.
--
-- /AttributeValueList/ can contain only one /AttributeValue/ element of type
-- String, Number, or Binary (not a set type). If the target attribute of the
-- comparison is of type String, then the operator checks for a substring match.
-- If the target attribute of the comparison is of type Binary, then the
-- operator looks for a subsequence of the target that matches the input. If the
-- target attribute of the comparison is a set ("'SS'", "'NS'", or "'BS'"), then the
-- operator evaluates to true if it finds an exact match with any member of the
-- set.
--
-- CONTAINS is supported for lists: When evaluating "'a CONTAINS b'", "'a'" can be
-- a list; however, "'b'" cannot be a set, a map, or a list.
--
-- 'NOT_CONTAINS' : Checks for absence of a subsequence, or absence of a value
-- in a set.
--
-- /AttributeValueList/ can contain only one /AttributeValue/ element of type
-- String, Number, or Binary (not a set type). If the target attribute of the
-- comparison is a String, then the operator checks for the absence of a
-- substring match. If the target attribute of the comparison is Binary, then
-- the operator checks for the absence of a subsequence of the target that
-- matches the input. If the target attribute of the comparison is a set ("'SS'", "'NS'", or "'BS'"), then the operator evaluates to true if it /does not/ find an
-- exact match with any member of the set.
--
-- NOT_CONTAINS is supported for lists: When evaluating "'a NOT CONTAINS b'", "'a'"
-- can be a list; however, "'b'" cannot be a set, a map, or a list.
--
-- 'BEGINS_WITH' : Checks for a prefix.
--
-- /AttributeValueList/ can contain only one /AttributeValue/ of type String or
-- Binary (not a Number or a set type). The target attribute of the comparison
-- must be of type String or Binary (not a Number or a set type).
--
--
--
-- 'IN' : Checks for matching elements within two sets.
--
-- /AttributeValueList/ can contain one or more /AttributeValue/ elements of type
-- String, Number, or Binary (not a set type). These attributes are compared
-- against an existing set type attribute of an item. If any elements of the
-- input set are present in the item attribute, the expression evaluates to true.
--
-- 'BETWEEN' : Greater than or equal to the first value, and less than or equal
-- to the second value.
--
-- /AttributeValueList/ must contain two /AttributeValue/ elements of the same
-- type, either String, Number, or Binary (not a set type). A target attribute
-- matches if the target value is greater than, or equal to, the first element
-- and less than, or equal to, the second element. If an item contains an /AttributeValue/ element of a different type than the one provided in the request, the value
-- does not match. For example, '{"S":"6"}' does not compare to '{"N":"6"}'. Also, '{"N":"6"}' does not compare to '{"NS":["6", "2", "1"]}'
--
--
eavComparisonOperator :: Lens' ExpectedAttributeValue (Maybe ComparisonOperator)
eavComparisonOperator =
lens _eavComparisonOperator (\s a -> s { _eavComparisonOperator = a })
-- | Causes DynamoDB to evaluate the value before attempting a conditional
-- operation:
--
-- If /Exists/ is 'true', DynamoDB will check to see if that attribute value
-- already exists in the table. If it is found, then the operation succeeds. If
-- it is not found, the operation fails with a /ConditionalCheckFailedException/.
--
-- If /Exists/ is 'false', DynamoDB assumes that the attribute value does not
-- exist in the table. If in fact the value does not exist, then the assumption
-- is valid and the operation succeeds. If the value is found, despite the
-- assumption that it does not exist, the operation fails with a /ConditionalCheckFailedException/.
--
-- The default setting for /Exists/ is 'true'. If you supply a /Value/ all by
-- itself, DynamoDB assumes the attribute exists: You don't have to set /Exists/
-- to 'true', because it is implied.
--
-- DynamoDB returns a /ValidationException/ if:
--
-- /Exists/ is 'true' but there is no /Value/ to check. (You expect a value to
-- exist, but don't specify what that value is.)
--
-- /Exists/ is 'false' but you also provide a /Value/. (You cannot expect an
-- attribute to have a value, while also expecting it not to exist.)
--
--
eavExists :: Lens' ExpectedAttributeValue (Maybe Bool)
eavExists = lens _eavExists (\s a -> s { _eavExists = a })
eavValue :: Lens' ExpectedAttributeValue (Maybe AttributeValue)
eavValue = lens _eavValue (\s a -> s { _eavValue = a })
instance FromJSON ExpectedAttributeValue where
parseJSON = withObject "ExpectedAttributeValue" $ \o -> ExpectedAttributeValue
<$> o .:? "AttributeValueList" .!= mempty
<*> o .:? "ComparisonOperator"
<*> o .:? "Exists"
<*> o .:? "Value"
instance ToJSON ExpectedAttributeValue where
toJSON ExpectedAttributeValue{..} = object
[ "Value" .= _eavValue
, "Exists" .= _eavExists
, "ComparisonOperator" .= _eavComparisonOperator
, "AttributeValueList" .= _eavAttributeValueList
]
data AttributeDefinition = AttributeDefinition
{ _adAttributeName :: Text
, _adAttributeType :: ScalarAttributeType
} deriving (Eq, Read, Show)
-- | 'AttributeDefinition' constructor.
--
-- The fields accessible through corresponding lenses are:
--
-- * 'adAttributeName' @::@ 'Text'
--
-- * 'adAttributeType' @::@ 'ScalarAttributeType'
--
attributeDefinition :: Text -- ^ 'adAttributeName'
-> ScalarAttributeType -- ^ 'adAttributeType'
-> AttributeDefinition
attributeDefinition p1 p2 = AttributeDefinition
{ _adAttributeName = p1
, _adAttributeType = p2
}
-- | A name for the attribute.
adAttributeName :: Lens' AttributeDefinition Text
adAttributeName = lens _adAttributeName (\s a -> s { _adAttributeName = a })
-- | The data type for the attribute.
adAttributeType :: Lens' AttributeDefinition ScalarAttributeType
adAttributeType = lens _adAttributeType (\s a -> s { _adAttributeType = a })
instance FromJSON AttributeDefinition where
parseJSON = withObject "AttributeDefinition" $ \o -> AttributeDefinition
<$> o .: "AttributeName"
<*> o .: "AttributeType"
instance ToJSON AttributeDefinition where
toJSON AttributeDefinition{..} = object
[ "AttributeName" .= _adAttributeName
, "AttributeType" .= _adAttributeType
]
data ComparisonOperator
= BeginsWith -- ^ BEGINS_WITH
| Between -- ^ BETWEEN
| Contains -- ^ CONTAINS
| Eq -- ^ EQ
| Ge -- ^ GE
| Gt -- ^ GT
| In' -- ^ IN
| Le -- ^ LE
| Lt -- ^ LT
| Ne -- ^ NE
| NotContains -- ^ NOT_CONTAINS
| NotNull -- ^ NOT_NULL
| Null -- ^ NULL
deriving (Eq, Ord, Read, Show, Generic, Enum)
instance Hashable ComparisonOperator
instance FromText ComparisonOperator where
parser = takeLowerText >>= \case
"begins_with" -> pure BeginsWith
"between" -> pure Between
"contains" -> pure Contains
"eq" -> pure Eq
"ge" -> pure Ge
"gt" -> pure Gt
"in" -> pure In'
"le" -> pure Le
"lt" -> pure Lt
"ne" -> pure Ne
"not_contains" -> pure NotContains
"not_null" -> pure NotNull
"null" -> pure Null
e -> fail $
"Failure parsing ComparisonOperator from " ++ show e
instance ToText ComparisonOperator where
toText = \case
BeginsWith -> "BEGINS_WITH"
Between -> "BETWEEN"
Contains -> "CONTAINS"
Eq -> "EQ"
Ge -> "GE"
Gt -> "GT"
In' -> "IN"
Le -> "LE"
Lt -> "LT"
Ne -> "NE"
NotContains -> "NOT_CONTAINS"
NotNull -> "NOT_NULL"
Null -> "NULL"
instance ToByteString ComparisonOperator
instance ToHeader ComparisonOperator
instance ToQuery ComparisonOperator
instance FromJSON ComparisonOperator where
parseJSON = parseJSONText "ComparisonOperator"
instance ToJSON ComparisonOperator where
toJSON = toJSONText
data ReturnValue
= RVAllNew -- ^ ALL_NEW
| RVAllOld -- ^ ALL_OLD
| RVNone -- ^ NONE
| RVUpdatedNew -- ^ UPDATED_NEW
| RVUpdatedOld -- ^ UPDATED_OLD
deriving (Eq, Ord, Read, Show, Generic, Enum)
instance Hashable ReturnValue
instance FromText ReturnValue where
parser = takeLowerText >>= \case
"all_new" -> pure RVAllNew
"all_old" -> pure RVAllOld
"none" -> pure RVNone
"updated_new" -> pure RVUpdatedNew
"updated_old" -> pure RVUpdatedOld
e -> fail $
"Failure parsing ReturnValue from " ++ show e
instance ToText ReturnValue where
toText = \case
RVAllNew -> "ALL_NEW"
RVAllOld -> "ALL_OLD"
RVNone -> "NONE"
RVUpdatedNew -> "UPDATED_NEW"
RVUpdatedOld -> "UPDATED_OLD"
instance ToByteString ReturnValue
instance ToHeader ReturnValue
instance ToQuery ReturnValue
instance FromJSON ReturnValue where
parseJSON = parseJSONText "ReturnValue"
instance ToJSON ReturnValue where
toJSON = toJSONText
data LocalSecondaryIndex = LocalSecondaryIndex
{ _lsiIndexName :: Text
, _lsiKeySchema :: List1 "KeySchema" KeySchemaElement
, _lsiProjection :: Projection
} deriving (Eq, Read, Show)
-- | 'LocalSecondaryIndex' constructor.
--
-- The fields accessible through corresponding lenses are:
--
-- * 'lsiIndexName' @::@ 'Text'
--
-- * 'lsiKeySchema' @::@ 'NonEmpty' 'KeySchemaElement'
--
-- * 'lsiProjection' @::@ 'Projection'
--
localSecondaryIndex :: Text -- ^ 'lsiIndexName'
-> NonEmpty KeySchemaElement -- ^ 'lsiKeySchema'
-> Projection -- ^ 'lsiProjection'
-> LocalSecondaryIndex
localSecondaryIndex p1 p2 p3 = LocalSecondaryIndex
{ _lsiIndexName = p1
, _lsiKeySchema = withIso _List1 (const id) p2
, _lsiProjection = p3
}
-- | The name of the local secondary index. The name must be unique among all
-- other indexes on this table.
lsiIndexName :: Lens' LocalSecondaryIndex Text
lsiIndexName = lens _lsiIndexName (\s a -> s { _lsiIndexName = a })
-- | The complete key schema for the local secondary index, consisting of one or
-- more pairs of attribute names and key types ('HASH' or 'RANGE').
lsiKeySchema :: Lens' LocalSecondaryIndex (NonEmpty KeySchemaElement)
lsiKeySchema = lens _lsiKeySchema (\s a -> s { _lsiKeySchema = a }) . _List1
lsiProjection :: Lens' LocalSecondaryIndex Projection
lsiProjection = lens _lsiProjection (\s a -> s { _lsiProjection = a })
instance FromJSON LocalSecondaryIndex where
parseJSON = withObject "LocalSecondaryIndex" $ \o -> LocalSecondaryIndex
<$> o .: "IndexName"
<*> o .: "KeySchema"
<*> o .: "Projection"
instance ToJSON LocalSecondaryIndex where
toJSON LocalSecondaryIndex{..} = object
[ "IndexName" .= _lsiIndexName
, "KeySchema" .= _lsiKeySchema
, "Projection" .= _lsiProjection
]
data GlobalSecondaryIndexDescription = GlobalSecondaryIndexDescription
{ _gsidBackfilling :: Maybe Bool
, _gsidIndexName :: Maybe Text
, _gsidIndexSizeBytes :: Maybe Integer
, _gsidIndexStatus :: Maybe IndexStatus
, _gsidItemCount :: Maybe Integer
, _gsidKeySchema :: List1 "KeySchema" KeySchemaElement
, _gsidProjection :: Maybe Projection
, _gsidProvisionedThroughput :: Maybe ProvisionedThroughputDescription
} deriving (Eq, Read, Show)
-- | 'GlobalSecondaryIndexDescription' constructor.
--
-- The fields accessible through corresponding lenses are:
--
-- * 'gsidBackfilling' @::@ 'Maybe' 'Bool'
--
-- * 'gsidIndexName' @::@ 'Maybe' 'Text'
--
-- * 'gsidIndexSizeBytes' @::@ 'Maybe' 'Integer'
--
-- * 'gsidIndexStatus' @::@ 'Maybe' 'IndexStatus'
--
-- * 'gsidItemCount' @::@ 'Maybe' 'Integer'
--
-- * 'gsidKeySchema' @::@ 'NonEmpty' 'KeySchemaElement'
--
-- * 'gsidProjection' @::@ 'Maybe' 'Projection'
--
-- * 'gsidProvisionedThroughput' @::@ 'Maybe' 'ProvisionedThroughputDescription'
--
globalSecondaryIndexDescription :: NonEmpty KeySchemaElement -- ^ 'gsidKeySchema'
-> GlobalSecondaryIndexDescription
globalSecondaryIndexDescription p1 = GlobalSecondaryIndexDescription
{ _gsidKeySchema = withIso _List1 (const id) p1
, _gsidIndexName = Nothing
, _gsidProjection = Nothing
, _gsidIndexStatus = Nothing
, _gsidBackfilling = Nothing
, _gsidProvisionedThroughput = Nothing
, _gsidIndexSizeBytes = Nothing
, _gsidItemCount = Nothing
}
-- | Indicates whether the index is currently backfilling. /Backfilling/ is the
-- process of reading items from the table and determining whether they can be
-- added to the index. (Not all items will qualify: For example, a hash key
-- attribute cannot have any duplicates.) If an item can be added to the index,
-- DynamoDB will do so. After all items have been processed, the backfilling
-- operation is complete and /Backfilling/ is false.
--
-- For indexes that were created during a /CreateTable/ operation, the /Backfilling/
-- attribute does not appear in the /DescribeTable/ output.
--
gsidBackfilling :: Lens' GlobalSecondaryIndexDescription (Maybe Bool)
gsidBackfilling = lens _gsidBackfilling (\s a -> s { _gsidBackfilling = a })
-- | The name of the global secondary index.
gsidIndexName :: Lens' GlobalSecondaryIndexDescription (Maybe Text)
gsidIndexName = lens _gsidIndexName (\s a -> s { _gsidIndexName = a })
-- | The total size of the specified index, in bytes. DynamoDB updates this value
-- approximately every six hours. Recent changes might not be reflected in this
-- value.
gsidIndexSizeBytes :: Lens' GlobalSecondaryIndexDescription (Maybe Integer)
gsidIndexSizeBytes =
lens _gsidIndexSizeBytes (\s a -> s { _gsidIndexSizeBytes = a })
-- | The current state of the global secondary index:
--
-- /CREATING/ - The index is being created.
--
-- /UPDATING/ - The index is being updated.
--
-- /DELETING/ - The index is being deleted.
--
-- /ACTIVE/ - The index is ready for use.
--
--
gsidIndexStatus :: Lens' GlobalSecondaryIndexDescription (Maybe IndexStatus)
gsidIndexStatus = lens _gsidIndexStatus (\s a -> s { _gsidIndexStatus = a })
-- | The number of items in the specified index. DynamoDB updates this value
-- approximately every six hours. Recent changes might not be reflected in this
-- value.
gsidItemCount :: Lens' GlobalSecondaryIndexDescription (Maybe Integer)
gsidItemCount = lens _gsidItemCount (\s a -> s { _gsidItemCount = a })
-- | The complete key schema for the global secondary index, consisting of one or
-- more pairs of attribute names and key types ('HASH' or 'RANGE').
gsidKeySchema :: Lens' GlobalSecondaryIndexDescription (NonEmpty KeySchemaElement)
gsidKeySchema = lens _gsidKeySchema (\s a -> s { _gsidKeySchema = a }) . _List1
gsidProjection :: Lens' GlobalSecondaryIndexDescription (Maybe Projection)
gsidProjection = lens _gsidProjection (\s a -> s { _gsidProjection = a })
gsidProvisionedThroughput :: Lens' GlobalSecondaryIndexDescription (Maybe ProvisionedThroughputDescription)
gsidProvisionedThroughput =
lens _gsidProvisionedThroughput
(\s a -> s { _gsidProvisionedThroughput = a })
instance FromJSON GlobalSecondaryIndexDescription where
parseJSON = withObject "GlobalSecondaryIndexDescription" $ \o -> GlobalSecondaryIndexDescription
<$> o .:? "Backfilling"
<*> o .:? "IndexName"
<*> o .:? "IndexSizeBytes"
<*> o .:? "IndexStatus"
<*> o .:? "ItemCount"
<*> o .: "KeySchema"
<*> o .:? "Projection"
<*> o .:? "ProvisionedThroughput"
instance ToJSON GlobalSecondaryIndexDescription where
toJSON GlobalSecondaryIndexDescription{..} = object
[ "IndexName" .= _gsidIndexName
, "KeySchema" .= _gsidKeySchema
, "Projection" .= _gsidProjection
, "IndexStatus" .= _gsidIndexStatus
, "Backfilling" .= _gsidBackfilling
, "ProvisionedThroughput" .= _gsidProvisionedThroughput
, "IndexSizeBytes" .= _gsidIndexSizeBytes
, "ItemCount" .= _gsidItemCount
]
data ItemCollectionMetrics = ItemCollectionMetrics
{ _icmItemCollectionKey :: Map Text AttributeValue
, _icmSizeEstimateRangeGB :: List "SizeEstimateRangeGB" Double
} deriving (Eq, Read, Show)
-- | 'ItemCollectionMetrics' constructor.
--
-- The fields accessible through corresponding lenses are:
--
-- * 'icmItemCollectionKey' @::@ 'HashMap' 'Text' 'AttributeValue'
--
-- * 'icmSizeEstimateRangeGB' @::@ ['Double']
--
itemCollectionMetrics :: ItemCollectionMetrics
itemCollectionMetrics = ItemCollectionMetrics
{ _icmItemCollectionKey = mempty
, _icmSizeEstimateRangeGB = mempty
}
-- | The hash key value of the item collection. This value is the same as the hash
-- key of the item.
icmItemCollectionKey :: Lens' ItemCollectionMetrics (HashMap Text AttributeValue)
icmItemCollectionKey =
lens _icmItemCollectionKey (\s a -> s { _icmItemCollectionKey = a })
. _Map
-- | An estimate of item collection size, in gigabytes. This value is a
-- two-element array containing a lower bound and an upper bound for the
-- estimate. The estimate includes the size of all the items in the table, plus
-- the size of all attributes projected into all of the local secondary indexes
-- on that table. Use this estimate to measure whether a local secondary index
-- is approaching its size limit.
--
-- The estimate is subject to change over time; therefore, do not rely on the
-- precision or accuracy of the estimate.
icmSizeEstimateRangeGB :: Lens' ItemCollectionMetrics [Double]
icmSizeEstimateRangeGB =
lens _icmSizeEstimateRangeGB (\s a -> s { _icmSizeEstimateRangeGB = a })
. _List
instance FromJSON ItemCollectionMetrics where
parseJSON = withObject "ItemCollectionMetrics" $ \o -> ItemCollectionMetrics
<$> o .:? "ItemCollectionKey" .!= mempty
<*> o .:? "SizeEstimateRangeGB" .!= mempty
instance ToJSON ItemCollectionMetrics where
toJSON ItemCollectionMetrics{..} = object
[ "ItemCollectionKey" .= _icmItemCollectionKey
, "SizeEstimateRangeGB" .= _icmSizeEstimateRangeGB
]
newtype Capacity = Capacity
{ _cCapacityUnits :: Maybe Double
} deriving (Eq, Ord, Read, Show)
-- | 'Capacity' constructor.
--
-- The fields accessible through corresponding lenses are:
--
-- * 'cCapacityUnits' @::@ 'Maybe' 'Double'
--
capacity :: Capacity
capacity = Capacity
{ _cCapacityUnits = Nothing
}
-- | The total number of capacity units consumed on a table or an index.
cCapacityUnits :: Lens' Capacity (Maybe Double)
cCapacityUnits = lens _cCapacityUnits (\s a -> s { _cCapacityUnits = a })
instance FromJSON Capacity where
parseJSON = withObject "Capacity" $ \o -> Capacity
<$> o .:? "CapacityUnits"
instance ToJSON Capacity where
toJSON Capacity{..} = object
[ "CapacityUnits" .= _cCapacityUnits
]
data ConsumedCapacity = ConsumedCapacity
{ _ccCapacityUnits :: Maybe Double
, _ccGlobalSecondaryIndexes :: Map Text Capacity
, _ccLocalSecondaryIndexes :: Map Text Capacity
, _ccTable :: Maybe Capacity
, _ccTableName :: Maybe Text
} deriving (Eq, Read, Show)
-- | 'ConsumedCapacity' constructor.
--
-- The fields accessible through corresponding lenses are:
--
-- * 'ccCapacityUnits' @::@ 'Maybe' 'Double'
--
-- * 'ccGlobalSecondaryIndexes' @::@ 'HashMap' 'Text' 'Capacity'
--
-- * 'ccLocalSecondaryIndexes' @::@ 'HashMap' 'Text' 'Capacity'
--
-- * 'ccTable' @::@ 'Maybe' 'Capacity'
--
-- * 'ccTableName' @::@ 'Maybe' 'Text'
--
consumedCapacity :: ConsumedCapacity
consumedCapacity = ConsumedCapacity
{ _ccTableName = Nothing
, _ccCapacityUnits = Nothing
, _ccTable = Nothing
, _ccLocalSecondaryIndexes = mempty
, _ccGlobalSecondaryIndexes = mempty
}
-- | The total number of capacity units consumed by the operation.
ccCapacityUnits :: Lens' ConsumedCapacity (Maybe Double)
ccCapacityUnits = lens _ccCapacityUnits (\s a -> s { _ccCapacityUnits = a })
-- | The amount of throughput consumed on each global index affected by the
-- operation.
ccGlobalSecondaryIndexes :: Lens' ConsumedCapacity (HashMap Text Capacity)
ccGlobalSecondaryIndexes =
lens _ccGlobalSecondaryIndexes
(\s a -> s { _ccGlobalSecondaryIndexes = a })
. _Map
-- | The amount of throughput consumed on each local index affected by the
-- operation.
ccLocalSecondaryIndexes :: Lens' ConsumedCapacity (HashMap Text Capacity)
ccLocalSecondaryIndexes =
lens _ccLocalSecondaryIndexes (\s a -> s { _ccLocalSecondaryIndexes = a })
. _Map
-- | The amount of throughput consumed on the table affected by the operation.
ccTable :: Lens' ConsumedCapacity (Maybe Capacity)
ccTable = lens _ccTable (\s a -> s { _ccTable = a })
-- | The name of the table that was affected by the operation.
ccTableName :: Lens' ConsumedCapacity (Maybe Text)
ccTableName = lens _ccTableName (\s a -> s { _ccTableName = a })
instance FromJSON ConsumedCapacity where
parseJSON = withObject "ConsumedCapacity" $ \o -> ConsumedCapacity
<$> o .:? "CapacityUnits"
<*> o .:? "GlobalSecondaryIndexes" .!= mempty
<*> o .:? "LocalSecondaryIndexes" .!= mempty
<*> o .:? "Table"
<*> o .:? "TableName"
instance ToJSON ConsumedCapacity where
toJSON ConsumedCapacity{..} = object
[ "TableName" .= _ccTableName
, "CapacityUnits" .= _ccCapacityUnits
, "Table" .= _ccTable
, "LocalSecondaryIndexes" .= _ccLocalSecondaryIndexes
, "GlobalSecondaryIndexes" .= _ccGlobalSecondaryIndexes
]
data GlobalSecondaryIndex = GlobalSecondaryIndex
{ _gsiIndexName :: Text
, _gsiKeySchema :: List1 "KeySchema" KeySchemaElement
, _gsiProjection :: Projection
, _gsiProvisionedThroughput :: ProvisionedThroughput
} deriving (Eq, Read, Show)
-- | 'GlobalSecondaryIndex' constructor.
--
-- The fields accessible through corresponding lenses are:
--
-- * 'gsiIndexName' @::@ 'Text'
--
-- * 'gsiKeySchema' @::@ 'NonEmpty' 'KeySchemaElement'
--
-- * 'gsiProjection' @::@ 'Projection'
--
-- * 'gsiProvisionedThroughput' @::@ 'ProvisionedThroughput'
--
globalSecondaryIndex :: Text -- ^ 'gsiIndexName'
-> NonEmpty KeySchemaElement -- ^ 'gsiKeySchema'
-> Projection -- ^ 'gsiProjection'
-> ProvisionedThroughput -- ^ 'gsiProvisionedThroughput'
-> GlobalSecondaryIndex
globalSecondaryIndex p1 p2 p3 p4 = GlobalSecondaryIndex
{ _gsiIndexName = p1
, _gsiKeySchema = withIso _List1 (const id) p2
, _gsiProjection = p3
, _gsiProvisionedThroughput = p4
}
-- | The name of the global secondary index. The name must be unique among all
-- other indexes on this table.
gsiIndexName :: Lens' GlobalSecondaryIndex Text
gsiIndexName = lens _gsiIndexName (\s a -> s { _gsiIndexName = a })
-- | The complete key schema for a global secondary index, which consists of one
-- or more pairs of attribute names and key types ('HASH' or 'RANGE').
gsiKeySchema :: Lens' GlobalSecondaryIndex (NonEmpty KeySchemaElement)
gsiKeySchema = lens _gsiKeySchema (\s a -> s { _gsiKeySchema = a }) . _List1
gsiProjection :: Lens' GlobalSecondaryIndex Projection
gsiProjection = lens _gsiProjection (\s a -> s { _gsiProjection = a })
gsiProvisionedThroughput :: Lens' GlobalSecondaryIndex ProvisionedThroughput
gsiProvisionedThroughput =
lens _gsiProvisionedThroughput
(\s a -> s { _gsiProvisionedThroughput = a })
instance FromJSON GlobalSecondaryIndex where
parseJSON = withObject "GlobalSecondaryIndex" $ \o -> GlobalSecondaryIndex
<$> o .: "IndexName"
<*> o .: "KeySchema"
<*> o .: "Projection"
<*> o .: "ProvisionedThroughput"
instance ToJSON GlobalSecondaryIndex where
toJSON GlobalSecondaryIndex{..} = object
[ "IndexName" .= _gsiIndexName
, "KeySchema" .= _gsiKeySchema
, "Projection" .= _gsiProjection
, "ProvisionedThroughput" .= _gsiProvisionedThroughput
]
data LocalSecondaryIndexDescription = LocalSecondaryIndexDescription
{ _lsidIndexName :: Maybe Text
, _lsidIndexSizeBytes :: Maybe Integer
, _lsidItemCount :: Maybe Integer
, _lsidKeySchema :: List1 "KeySchema" KeySchemaElement
, _lsidProjection :: Maybe Projection
} deriving (Eq, Read, Show)
-- | 'LocalSecondaryIndexDescription' constructor.
--
-- The fields accessible through corresponding lenses are:
--
-- * 'lsidIndexName' @::@ 'Maybe' 'Text'
--
-- * 'lsidIndexSizeBytes' @::@ 'Maybe' 'Integer'
--
-- * 'lsidItemCount' @::@ 'Maybe' 'Integer'
--
-- * 'lsidKeySchema' @::@ 'NonEmpty' 'KeySchemaElement'
--
-- * 'lsidProjection' @::@ 'Maybe' 'Projection'
--
localSecondaryIndexDescription :: NonEmpty KeySchemaElement -- ^ 'lsidKeySchema'
-> LocalSecondaryIndexDescription
localSecondaryIndexDescription p1 = LocalSecondaryIndexDescription
{ _lsidKeySchema = withIso _List1 (const id) p1
, _lsidIndexName = Nothing
, _lsidProjection = Nothing
, _lsidIndexSizeBytes = Nothing
, _lsidItemCount = Nothing
}
-- | Represents the name of the local secondary index.
lsidIndexName :: Lens' LocalSecondaryIndexDescription (Maybe Text)
lsidIndexName = lens _lsidIndexName (\s a -> s { _lsidIndexName = a })
-- | The total size of the specified index, in bytes. DynamoDB updates this value
-- approximately every six hours. Recent changes might not be reflected in this
-- value.
lsidIndexSizeBytes :: Lens' LocalSecondaryIndexDescription (Maybe Integer)
lsidIndexSizeBytes =
lens _lsidIndexSizeBytes (\s a -> s { _lsidIndexSizeBytes = a })
-- | The number of items in the specified index. DynamoDB updates this value
-- approximately every six hours. Recent changes might not be reflected in this
-- value.
lsidItemCount :: Lens' LocalSecondaryIndexDescription (Maybe Integer)
lsidItemCount = lens _lsidItemCount (\s a -> s { _lsidItemCount = a })
-- | The complete index key schema, which consists of one or more pairs of
-- attribute names and key types ('HASH' or 'RANGE').
lsidKeySchema :: Lens' LocalSecondaryIndexDescription (NonEmpty KeySchemaElement)
lsidKeySchema = lens _lsidKeySchema (\s a -> s { _lsidKeySchema = a }) . _List1
lsidProjection :: Lens' LocalSecondaryIndexDescription (Maybe Projection)
lsidProjection = lens _lsidProjection (\s a -> s { _lsidProjection = a })
instance FromJSON LocalSecondaryIndexDescription where
parseJSON = withObject "LocalSecondaryIndexDescription" $ \o -> LocalSecondaryIndexDescription
<$> o .:? "IndexName"
<*> o .:? "IndexSizeBytes"
<*> o .:? "ItemCount"
<*> o .: "KeySchema"
<*> o .:? "Projection"
instance ToJSON LocalSecondaryIndexDescription where
toJSON LocalSecondaryIndexDescription{..} = object
[ "IndexName" .= _lsidIndexName
, "KeySchema" .= _lsidKeySchema
, "Projection" .= _lsidProjection
, "IndexSizeBytes" .= _lsidIndexSizeBytes
, "ItemCount" .= _lsidItemCount
]
data AttributeAction
= Add -- ^ ADD
| Delete' -- ^ DELETE
| Put -- ^ PUT
deriving (Eq, Ord, Read, Show, Generic, Enum)
instance Hashable AttributeAction
instance FromText AttributeAction where
parser = takeLowerText >>= \case
"add" -> pure Add
"delete" -> pure Delete'
"put" -> pure Put
e -> fail $
"Failure parsing AttributeAction from " ++ show e
instance ToText AttributeAction where
toText = \case
Add -> "ADD"
Delete' -> "DELETE"
Put -> "PUT"
instance ToByteString AttributeAction
instance ToHeader AttributeAction
instance ToQuery AttributeAction
instance FromJSON AttributeAction where
parseJSON = parseJSONText "AttributeAction"
instance ToJSON AttributeAction where
toJSON = toJSONText
data ScalarAttributeType
= B -- ^ B
| N -- ^ N
| S -- ^ S
deriving (Eq, Ord, Read, Show, Generic, Enum)
instance Hashable ScalarAttributeType
instance FromText ScalarAttributeType where
parser = takeLowerText >>= \case
"b" -> pure B
"n" -> pure N
"s" -> pure S
e -> fail $
"Failure parsing ScalarAttributeType from " ++ show e
instance ToText ScalarAttributeType where
toText = \case
B -> "B"
N -> "N"
S -> "S"
instance ToByteString ScalarAttributeType
instance ToHeader ScalarAttributeType
instance ToQuery ScalarAttributeType
instance FromJSON ScalarAttributeType where
parseJSON = parseJSONText "ScalarAttributeType"
instance ToJSON ScalarAttributeType where
toJSON = toJSONText
data Projection = Projection
{ _pNonKeyAttributes :: List1 "NonKeyAttributes" Text
, _pProjectionType :: Maybe ProjectionType
} deriving (Eq, Read, Show)
-- | 'Projection' constructor.
--
-- The fields accessible through corresponding lenses are:
--
-- * 'pNonKeyAttributes' @::@ 'NonEmpty' 'Text'
--
-- * 'pProjectionType' @::@ 'Maybe' 'ProjectionType'
--
projection :: NonEmpty Text -- ^ 'pNonKeyAttributes'
-> Projection
projection p1 = Projection
{ _pNonKeyAttributes = withIso _List1 (const id) p1
, _pProjectionType = Nothing
}
-- | Represents the non-key attribute names which will be projected into the index.
--
-- For local secondary indexes, the total count of /NonKeyAttributes/ summed
-- across all of the local secondary indexes, must not exceed 20. If you project
-- the same attribute into two different indexes, this counts as two distinct
-- attributes when determining the total.
pNonKeyAttributes :: Lens' Projection (NonEmpty Text)
pNonKeyAttributes =
lens _pNonKeyAttributes (\s a -> s { _pNonKeyAttributes = a })
. _List1
-- | The set of attributes that are projected into the index:
--
-- 'KEYS_ONLY' - Only the index and primary keys are projected into the index.
--
-- 'INCLUDE' - Only the specified table attributes are projected into the
-- index. The list of projected attributes are in /NonKeyAttributes/.
--
-- 'ALL' - All of the table attributes are projected into the index.
--
--
pProjectionType :: Lens' Projection (Maybe ProjectionType)
pProjectionType = lens _pProjectionType (\s a -> s { _pProjectionType = a })
instance FromJSON Projection where
parseJSON = withObject "Projection" $ \o -> Projection
<$> o .: "NonKeyAttributes"
<*> o .:? "ProjectionType"
instance ToJSON Projection where
toJSON Projection{..} = object
[ "ProjectionType" .= _pProjectionType
, "NonKeyAttributes" .= _pNonKeyAttributes
]
data CreateGlobalSecondaryIndexAction = CreateGlobalSecondaryIndexAction
{ _cgsiaIndexName :: Text
, _cgsiaKeySchema :: List1 "KeySchema" KeySchemaElement
, _cgsiaProjection :: Projection
, _cgsiaProvisionedThroughput :: ProvisionedThroughput
} deriving (Eq, Read, Show)
-- | 'CreateGlobalSecondaryIndexAction' constructor.
--
-- The fields accessible through corresponding lenses are:
--
-- * 'cgsiaIndexName' @::@ 'Text'
--
-- * 'cgsiaKeySchema' @::@ 'NonEmpty' 'KeySchemaElement'
--
-- * 'cgsiaProjection' @::@ 'Projection'
--
-- * 'cgsiaProvisionedThroughput' @::@ 'ProvisionedThroughput'
--
createGlobalSecondaryIndexAction :: Text -- ^ 'cgsiaIndexName'
-> NonEmpty KeySchemaElement -- ^ 'cgsiaKeySchema'
-> Projection -- ^ 'cgsiaProjection'
-> ProvisionedThroughput -- ^ 'cgsiaProvisionedThroughput'
-> CreateGlobalSecondaryIndexAction
createGlobalSecondaryIndexAction p1 p2 p3 p4 = CreateGlobalSecondaryIndexAction
{ _cgsiaIndexName = p1
, _cgsiaKeySchema = withIso _List1 (const id) p2
, _cgsiaProjection = p3
, _cgsiaProvisionedThroughput = p4
}
-- | The name of the global secondary index to be created.
cgsiaIndexName :: Lens' CreateGlobalSecondaryIndexAction Text
cgsiaIndexName = lens _cgsiaIndexName (\s a -> s { _cgsiaIndexName = a })
-- | The key schema for the global secondary index.
cgsiaKeySchema :: Lens' CreateGlobalSecondaryIndexAction (NonEmpty KeySchemaElement)
cgsiaKeySchema = lens _cgsiaKeySchema (\s a -> s { _cgsiaKeySchema = a }) . _List1
cgsiaProjection :: Lens' CreateGlobalSecondaryIndexAction Projection
cgsiaProjection = lens _cgsiaProjection (\s a -> s { _cgsiaProjection = a })
cgsiaProvisionedThroughput :: Lens' CreateGlobalSecondaryIndexAction ProvisionedThroughput
cgsiaProvisionedThroughput =
lens _cgsiaProvisionedThroughput
(\s a -> s { _cgsiaProvisionedThroughput = a })
instance FromJSON CreateGlobalSecondaryIndexAction where
parseJSON = withObject "CreateGlobalSecondaryIndexAction" $ \o -> CreateGlobalSecondaryIndexAction
<$> o .: "IndexName"
<*> o .: "KeySchema"
<*> o .: "Projection"
<*> o .: "ProvisionedThroughput"
instance ToJSON CreateGlobalSecondaryIndexAction where
toJSON CreateGlobalSecondaryIndexAction{..} = object
[ "IndexName" .= _cgsiaIndexName
, "KeySchema" .= _cgsiaKeySchema
, "Projection" .= _cgsiaProjection
, "ProvisionedThroughput" .= _cgsiaProvisionedThroughput
]
data Select
= AllAttributes -- ^ ALL_ATTRIBUTES
| AllProjectedAttributes -- ^ ALL_PROJECTED_ATTRIBUTES
| Count -- ^ COUNT
| SpecificAttributes -- ^ SPECIFIC_ATTRIBUTES
deriving (Eq, Ord, Read, Show, Generic, Enum)
instance Hashable Select
instance FromText Select where
parser = takeLowerText >>= \case
"all_attributes" -> pure AllAttributes
"all_projected_attributes" -> pure AllProjectedAttributes
"count" -> pure Count
"specific_attributes" -> pure SpecificAttributes
e -> fail $
"Failure parsing Select from " ++ show e
instance ToText Select where
toText = \case
AllAttributes -> "ALL_ATTRIBUTES"
AllProjectedAttributes -> "ALL_PROJECTED_ATTRIBUTES"
Count -> "COUNT"
SpecificAttributes -> "SPECIFIC_ATTRIBUTES"
instance ToByteString Select
instance ToHeader Select
instance ToQuery Select
instance FromJSON Select where
parseJSON = parseJSONText "Select"
instance ToJSON Select where
toJSON = toJSONText
data KeySchemaElement = KeySchemaElement
{ _kseAttributeName :: Text
, _kseKeyType :: KeyType
} deriving (Eq, Read, Show)
-- | 'KeySchemaElement' constructor.
--
-- The fields accessible through corresponding lenses are:
--
-- * 'kseAttributeName' @::@ 'Text'
--
-- * 'kseKeyType' @::@ 'KeyType'
--
keySchemaElement :: Text -- ^ 'kseAttributeName'
-> KeyType -- ^ 'kseKeyType'
-> KeySchemaElement
keySchemaElement p1 p2 = KeySchemaElement
{ _kseAttributeName = p1
, _kseKeyType = p2
}
-- | The name of a key attribute.
kseAttributeName :: Lens' KeySchemaElement Text
kseAttributeName = lens _kseAttributeName (\s a -> s { _kseAttributeName = a })
-- | The attribute data, consisting of the data type and the attribute value
-- itself.
kseKeyType :: Lens' KeySchemaElement KeyType
kseKeyType = lens _kseKeyType (\s a -> s { _kseKeyType = a })
instance FromJSON KeySchemaElement where
parseJSON = withObject "KeySchemaElement" $ \o -> KeySchemaElement
<$> o .: "AttributeName"
<*> o .: "KeyType"
instance ToJSON KeySchemaElement where
toJSON KeySchemaElement{..} = object
[ "AttributeName" .= _kseAttributeName
, "KeyType" .= _kseKeyType
]
newtype DeleteGlobalSecondaryIndexAction = DeleteGlobalSecondaryIndexAction
{ _dgsiaIndexName :: Text
} deriving (Eq, Ord, Read, Show, Monoid, IsString)
-- | 'DeleteGlobalSecondaryIndexAction' constructor.
--
-- The fields accessible through corresponding lenses are:
--
-- * 'dgsiaIndexName' @::@ 'Text'
--
deleteGlobalSecondaryIndexAction :: Text -- ^ 'dgsiaIndexName'
-> DeleteGlobalSecondaryIndexAction
deleteGlobalSecondaryIndexAction p1 = DeleteGlobalSecondaryIndexAction
{ _dgsiaIndexName = p1
}
-- | The name of the global secondary index to be deleted.
dgsiaIndexName :: Lens' DeleteGlobalSecondaryIndexAction Text
dgsiaIndexName = lens _dgsiaIndexName (\s a -> s { _dgsiaIndexName = a })
instance FromJSON DeleteGlobalSecondaryIndexAction where
parseJSON = withObject "DeleteGlobalSecondaryIndexAction" $ \o -> DeleteGlobalSecondaryIndexAction
<$> o .: "IndexName"
instance ToJSON DeleteGlobalSecondaryIndexAction where
toJSON DeleteGlobalSecondaryIndexAction{..} = object
[ "IndexName" .= _dgsiaIndexName
]
newtype DeleteRequest = DeleteRequest
{ _dKey :: Map Text AttributeValue
} deriving (Eq, Read, Show, Monoid, Semigroup)
-- | 'DeleteRequest' constructor.
--
-- The fields accessible through corresponding lenses are:
--
-- * 'dKey' @::@ 'HashMap' 'Text' 'AttributeValue'
--
deleteRequest :: DeleteRequest
deleteRequest = DeleteRequest
{ _dKey = mempty
}
-- | A map of attribute name to attribute values, representing the primary key of
-- the item to delete. All of the table's primary key attributes must be
-- specified, and their data types must match those of the table's key schema.
dKey :: Lens' DeleteRequest (HashMap Text AttributeValue)
dKey = lens _dKey (\s a -> s { _dKey = a }) . _Map
instance FromJSON DeleteRequest where
parseJSON = withObject "DeleteRequest" $ \o -> DeleteRequest
<$> o .:? "Key" .!= mempty
instance ToJSON DeleteRequest where
toJSON DeleteRequest{..} = object
[ "Key" .= _dKey
]
data UpdateGlobalSecondaryIndexAction = UpdateGlobalSecondaryIndexAction
{ _ugsiaIndexName :: Text
, _ugsiaProvisionedThroughput :: ProvisionedThroughput
} deriving (Eq, Read, Show)
-- | 'UpdateGlobalSecondaryIndexAction' constructor.
--
-- The fields accessible through corresponding lenses are:
--
-- * 'ugsiaIndexName' @::@ 'Text'
--
-- * 'ugsiaProvisionedThroughput' @::@ 'ProvisionedThroughput'
--
updateGlobalSecondaryIndexAction :: Text -- ^ 'ugsiaIndexName'
-> ProvisionedThroughput -- ^ 'ugsiaProvisionedThroughput'
-> UpdateGlobalSecondaryIndexAction
updateGlobalSecondaryIndexAction p1 p2 = UpdateGlobalSecondaryIndexAction
{ _ugsiaIndexName = p1
, _ugsiaProvisionedThroughput = p2
}
-- | The name of the global secondary index to be updated.
ugsiaIndexName :: Lens' UpdateGlobalSecondaryIndexAction Text
ugsiaIndexName = lens _ugsiaIndexName (\s a -> s { _ugsiaIndexName = a })
ugsiaProvisionedThroughput :: Lens' UpdateGlobalSecondaryIndexAction ProvisionedThroughput
ugsiaProvisionedThroughput =
lens _ugsiaProvisionedThroughput
(\s a -> s { _ugsiaProvisionedThroughput = a })
instance FromJSON UpdateGlobalSecondaryIndexAction where
parseJSON = withObject "UpdateGlobalSecondaryIndexAction" $ \o -> UpdateGlobalSecondaryIndexAction
<$> o .: "IndexName"
<*> o .: "ProvisionedThroughput"
instance ToJSON UpdateGlobalSecondaryIndexAction where
toJSON UpdateGlobalSecondaryIndexAction{..} = object
[ "IndexName" .= _ugsiaIndexName
, "ProvisionedThroughput" .= _ugsiaProvisionedThroughput
]
newtype PutRequest = PutRequest
{ _pItem :: Map Text AttributeValue
} deriving (Eq, Read, Show, Monoid, Semigroup)
-- | 'PutRequest' constructor.
--
-- The fields accessible through corresponding lenses are:
--
-- * 'pItem' @::@ 'HashMap' 'Text' 'AttributeValue'
--
putRequest :: PutRequest
putRequest = PutRequest
{ _pItem = mempty
}
-- | A map of attribute name to attribute values, representing the primary key of
-- an item to be processed by /PutItem/. All of the table's primary key attributes
-- must be specified, and their data types must match those of the table's key
-- schema. If any attributes are present in the item which are part of an index
-- key schema for the table, their types must match the index key schema.
pItem :: Lens' PutRequest (HashMap Text AttributeValue)
pItem = lens _pItem (\s a -> s { _pItem = a }) . _Map
instance FromJSON PutRequest where
parseJSON = withObject "PutRequest" $ \o -> PutRequest
<$> o .:? "Item" .!= mempty
instance ToJSON PutRequest where
toJSON PutRequest{..} = object
[ "Item" .= _pItem
]
data Condition = Condition
{ _cAttributeValueList :: List "AttributeValueList" AttributeValue
, _cComparisonOperator :: ComparisonOperator
} deriving (Eq, Read, Show)
-- | 'Condition' constructor.
--
-- The fields accessible through corresponding lenses are:
--
-- * 'cAttributeValueList' @::@ ['AttributeValue']
--
-- * 'cComparisonOperator' @::@ 'ComparisonOperator'
--
condition :: ComparisonOperator -- ^ 'cComparisonOperator'
-> Condition
condition p1 = Condition
{ _cComparisonOperator = p1
, _cAttributeValueList = mempty
}
-- | One or more values to evaluate against the supplied attribute. The number of
-- values in the list depends on the /ComparisonOperator/ being used.
--
-- For type Number, value comparisons are numeric.
--
-- String value comparisons for greater than, equals, or less than are based on
-- ASCII character code values. For example, 'a' is greater than 'A', and 'a' is
-- greater than 'B'. For a list of code values, see <http://en.wikipedia.org/wiki/ASCII#ASCII_printable_characters http://en.wikipedia.org/wiki/ASCII#ASCII_printable_characters>.
--
-- For Binary, DynamoDB treats each byte of the binary data as unsigned when it
-- compares binary values.
cAttributeValueList :: Lens' Condition [AttributeValue]
cAttributeValueList =
lens _cAttributeValueList (\s a -> s { _cAttributeValueList = a })
. _List
-- | A comparator for evaluating attributes. For example, equals, greater than,
-- less than, etc.
--
-- The following comparison operators are available:
--
-- 'EQ | NE | LE | LT | GE | GT | NOT_NULL | NULL | CONTAINS | NOT_CONTAINS |BEGINS_WITH | IN | BETWEEN'
--
-- The following are descriptions of each comparison operator.
--
-- 'EQ' : Equal. 'EQ' is supported for all datatypes, including lists and maps.
--
-- /AttributeValueList/ can contain only one /AttributeValue/ element of type
-- String, Number, Binary, String Set, Number Set, or Binary Set. If an item
-- contains an /AttributeValue/ element of a different type than the one provided
-- in the request, the value does not match. For example, '{"S":"6"}' does not
-- equal '{"N":"6"}'. Also, '{"N":"6"}' does not equal '{"NS":["6", "2", "1"]}'.
--
--
--
-- 'NE' : Not equal. 'NE' is supported for all datatypes, including lists and
-- maps.
--
-- /AttributeValueList/ can contain only one /AttributeValue/ of type String,
-- Number, Binary, String Set, Number Set, or Binary Set. If an item contains an /AttributeValue/ of a different type than the one provided in the request, the
-- value does not match. For example, '{"S":"6"}' does not equal '{"N":"6"}'. Also, '{"N":"6"}' does not equal '{"NS":["6", "2", "1"]}'.
--
--
--
-- 'LE' : Less than or equal.
--
-- /AttributeValueList/ can contain only one /AttributeValue/ element of type
-- String, Number, or Binary (not a set type). If an item contains an /AttributeValue/ element of a different type than the one provided in the request, the value
-- does not match. For example, '{"S":"6"}' does not equal '{"N":"6"}'. Also, '{"N":"6"}' does not compare to '{"NS":["6", "2", "1"]}'.
--
--
--
-- 'LT' : Less than.
--
-- /AttributeValueList/ can contain only one /AttributeValue/ of type String,
-- Number, or Binary (not a set type). If an item contains an /AttributeValue/
-- element of a different type than the one provided in the request, the value
-- does not match. For example, '{"S":"6"}' does not equal '{"N":"6"}'. Also, '{"N":"6"}' does not compare to '{"NS":["6", "2", "1"]}'.
--
--
--
-- 'GE' : Greater than or equal.
--
-- /AttributeValueList/ can contain only one /AttributeValue/ element of type
-- String, Number, or Binary (not a set type). If an item contains an /AttributeValue/ element of a different type than the one provided in the request, the value
-- does not match. For example, '{"S":"6"}' does not equal '{"N":"6"}'. Also, '{"N":"6"}' does not compare to '{"NS":["6", "2", "1"]}'.
--
--
--
-- 'GT' : Greater than.
--
-- /AttributeValueList/ can contain only one /AttributeValue/ element of type
-- String, Number, or Binary (not a set type). If an item contains an /AttributeValue/ element of a different type than the one provided in the request, the value
-- does not match. For example, '{"S":"6"}' does not equal '{"N":"6"}'. Also, '{"N":"6"}' does not compare to '{"NS":["6", "2", "1"]}'.
--
--
--
-- 'NOT_NULL' : The attribute exists. 'NOT_NULL' is supported for all datatypes,
-- including lists and maps.
--
-- This operator tests for the existence of an attribute, not its data type. If
-- the data type of attribute "'a'" is null, and you evaluate it using 'NOT_NULL',
-- the result is a Boolean /true/. This result is because the attribute "'a'"
-- exists; its data type is not relevant to the 'NOT_NULL' comparison operator.
--
-- 'NULL' : The attribute does not exist. 'NULL' is supported for all datatypes,
-- including lists and maps.
--
-- This operator tests for the nonexistence of an attribute, not its data type.
-- If the data type of attribute "'a'" is null, and you evaluate it using 'NULL',
-- the result is a Boolean /false/. This is because the attribute "'a'" exists; its
-- data type is not relevant to the 'NULL' comparison operator.
--
-- 'CONTAINS' : Checks for a subsequence, or value in a set.
--
-- /AttributeValueList/ can contain only one /AttributeValue/ element of type
-- String, Number, or Binary (not a set type). If the target attribute of the
-- comparison is of type String, then the operator checks for a substring match.
-- If the target attribute of the comparison is of type Binary, then the
-- operator looks for a subsequence of the target that matches the input. If the
-- target attribute of the comparison is a set ("'SS'", "'NS'", or "'BS'"), then the
-- operator evaluates to true if it finds an exact match with any member of the
-- set.
--
-- CONTAINS is supported for lists: When evaluating "'a CONTAINS b'", "'a'" can be
-- a list; however, "'b'" cannot be a set, a map, or a list.
--
-- 'NOT_CONTAINS' : Checks for absence of a subsequence, or absence of a value
-- in a set.
--
-- /AttributeValueList/ can contain only one /AttributeValue/ element of type
-- String, Number, or Binary (not a set type). If the target attribute of the
-- comparison is a String, then the operator checks for the absence of a
-- substring match. If the target attribute of the comparison is Binary, then
-- the operator checks for the absence of a subsequence of the target that
-- matches the input. If the target attribute of the comparison is a set ("'SS'", "'NS'", or "'BS'"), then the operator evaluates to true if it /does not/ find an
-- exact match with any member of the set.
--
-- NOT_CONTAINS is supported for lists: When evaluating "'a NOT CONTAINS b'", "'a'"
-- can be a list; however, "'b'" cannot be a set, a map, or a list.
--
-- 'BEGINS_WITH' : Checks for a prefix.
--
-- /AttributeValueList/ can contain only one /AttributeValue/ of type String or
-- Binary (not a Number or a set type). The target attribute of the comparison
-- must be of type String or Binary (not a Number or a set type).
--
--
--
-- 'IN' : Checks for matching elements within two sets.
--
-- /AttributeValueList/ can contain one or more /AttributeValue/ elements of type
-- String, Number, or Binary (not a set type). These attributes are compared
-- against an existing set type attribute of an item. If any elements of the
-- input set are present in the item attribute, the expression evaluates to true.
--
-- 'BETWEEN' : Greater than or equal to the first value, and less than or equal
-- to the second value.
--
-- /AttributeValueList/ must contain two /AttributeValue/ elements of the same
-- type, either String, Number, or Binary (not a set type). A target attribute
-- matches if the target value is greater than, or equal to, the first element
-- and less than, or equal to, the second element. If an item contains an /AttributeValue/ element of a different type than the one provided in the request, the value
-- does not match. For example, '{"S":"6"}' does not compare to '{"N":"6"}'. Also, '{"N":"6"}' does not compare to '{"NS":["6", "2", "1"]}'
--
-- For usage examples of /AttributeValueList/ and /ComparisonOperator/, see <http://docs.aws.amazon.com/amazondynamodb/latest/developerguide/LegacyConditionalParameters.html Legacy Conditional Parameters> in the /Amazon DynamoDB Developer Guide/.
cComparisonOperator :: Lens' Condition ComparisonOperator
cComparisonOperator =
lens _cComparisonOperator (\s a -> s { _cComparisonOperator = a })
instance FromJSON Condition where
parseJSON = withObject "Condition" $ \o -> Condition
<$> o .:? "AttributeValueList" .!= mempty
<*> o .: "ComparisonOperator"
instance ToJSON Condition where
toJSON Condition{..} = object
[ "AttributeValueList" .= _cAttributeValueList
, "ComparisonOperator" .= _cComparisonOperator
]
data ConditionalOperator
= And -- ^ AND
| Or -- ^ OR
deriving (Eq, Ord, Read, Show, Generic, Enum)
instance Hashable ConditionalOperator
instance FromText ConditionalOperator where
parser = takeLowerText >>= \case
"and" -> pure And
"or" -> pure Or
e -> fail $
"Failure parsing ConditionalOperator from " ++ show e
instance ToText ConditionalOperator where
toText = \case
And -> "AND"
Or -> "OR"
instance ToByteString ConditionalOperator
instance ToHeader ConditionalOperator
instance ToQuery ConditionalOperator
instance FromJSON ConditionalOperator where
parseJSON = parseJSONText "ConditionalOperator"
instance ToJSON ConditionalOperator where
toJSON = toJSONText
data GlobalSecondaryIndexUpdate = GlobalSecondaryIndexUpdate
{ _gsiuCreate :: Maybe CreateGlobalSecondaryIndexAction
, _gsiuDelete :: Maybe DeleteGlobalSecondaryIndexAction
, _gsiuUpdate :: Maybe UpdateGlobalSecondaryIndexAction
} deriving (Eq, Read, Show)
-- | 'GlobalSecondaryIndexUpdate' constructor.
--
-- The fields accessible through corresponding lenses are:
--
-- * 'gsiuCreate' @::@ 'Maybe' 'CreateGlobalSecondaryIndexAction'
--
-- * 'gsiuDelete' @::@ 'Maybe' 'DeleteGlobalSecondaryIndexAction'
--
-- * 'gsiuUpdate' @::@ 'Maybe' 'UpdateGlobalSecondaryIndexAction'
--
globalSecondaryIndexUpdate :: GlobalSecondaryIndexUpdate
globalSecondaryIndexUpdate = GlobalSecondaryIndexUpdate
{ _gsiuUpdate = Nothing
, _gsiuCreate = Nothing
, _gsiuDelete = Nothing
}
-- | The parameters required for creating a global secondary index on an existing
-- table:
--
-- 'IndexName '
--
-- 'KeySchema '
--
-- 'AttributeDefinitions '
--
-- 'Projection '
--
-- 'ProvisionedThroughput '
--
--
gsiuCreate :: Lens' GlobalSecondaryIndexUpdate (Maybe CreateGlobalSecondaryIndexAction)
gsiuCreate = lens _gsiuCreate (\s a -> s { _gsiuCreate = a })
-- | The name of an existing global secondary index to be removed.
gsiuDelete :: Lens' GlobalSecondaryIndexUpdate (Maybe DeleteGlobalSecondaryIndexAction)
gsiuDelete = lens _gsiuDelete (\s a -> s { _gsiuDelete = a })
-- | The name of an existing global secondary index, along with new provisioned
-- throughput settings to be applied to that index.
gsiuUpdate :: Lens' GlobalSecondaryIndexUpdate (Maybe UpdateGlobalSecondaryIndexAction)
gsiuUpdate = lens _gsiuUpdate (\s a -> s { _gsiuUpdate = a })
instance FromJSON GlobalSecondaryIndexUpdate where
parseJSON = withObject "GlobalSecondaryIndexUpdate" $ \o -> GlobalSecondaryIndexUpdate
<$> o .:? "Create"
<*> o .:? "Delete"
<*> o .:? "Update"
instance ToJSON GlobalSecondaryIndexUpdate where
toJSON GlobalSecondaryIndexUpdate{..} = object
[ "Update" .= _gsiuUpdate
, "Create" .= _gsiuCreate
, "Delete" .= _gsiuDelete
]
| kim/amazonka | amazonka-dynamodb/gen/Network/AWS/DynamoDB/Types.hs | mpl-2.0 | 103,319 | 0 | 32 | 22,571 | 13,631 | 7,877 | 5,754 | -1 | -1 |
{-# LANGUAGE BangPatterns, FlexibleContexts, UnboxedTuples #-}
-- |
-- Module : Statistics.Sample.KernelDensity
-- Copyright : (c) 2011 Bryan O'Sullivan
-- License : BSD3
--
-- Maintainer : bos@serpentine.com
-- Stability : experimental
-- Portability : portable
--
-- Kernel density estimation. This module provides a fast, robust,
-- non-parametric way to estimate the probability density function of
-- a sample.
--
-- This estimator does not use the commonly employed \"Gaussian rule
-- of thumb\". As a result, it outperforms many plug-in methods on
-- multimodal samples with widely separated modes.
module Statistics.Sample.KernelDensity
(
-- * Estimation functions
kde
, kde_
-- * References
-- $references
) where
import Data.Default.Class
import Numeric.MathFunctions.Constants (m_sqrt_2_pi)
import Numeric.RootFinding (fromRoot, ridders, RiddersParam(..), Tolerance(..))
import Prelude hiding (const, min, max, sum)
import Statistics.Function (minMax, nextHighestPowerOfTwo)
import Statistics.Sample.Histogram (histogram_)
import Statistics.Sample.Internal (sum)
import Statistics.Transform (CD, dct, idct)
import qualified Data.Vector.Generic as G
import qualified Data.Vector.Unboxed as U
import qualified Data.Vector as V
-- | Gaussian kernel density estimator for one-dimensional data, using
-- the method of Botev et al.
--
-- The result is a pair of vectors, containing:
--
-- * The coordinates of each mesh point. The mesh interval is chosen
-- to be 20% larger than the range of the sample. (To specify the
-- mesh interval, use 'kde_'.)
--
-- * Density estimates at each mesh point.
kde :: (G.Vector v CD, G.Vector v Double, G.Vector v Int)
=> Int
-- ^ The number of mesh points to use in the uniform discretization
-- of the interval @(min,max)@. If this value is not a power of
-- two, then it is rounded up to the next power of two.
-> v Double -> (v Double, v Double)
kde n0 xs = kde_ n0 (lo - range / 10) (hi + range / 10) xs
where
(lo,hi) = minMax xs
range | G.length xs <= 1 = 1 -- Unreasonable guess
| lo == hi = 1 -- All elements are equal
| otherwise = hi - lo
{-# INLINABLE kde #-}
{-# SPECIAlIZE kde :: Int -> U.Vector Double -> (U.Vector Double, U.Vector Double) #-}
{-# SPECIAlIZE kde :: Int -> V.Vector Double -> (V.Vector Double, V.Vector Double) #-}
-- | Gaussian kernel density estimator for one-dimensional data, using
-- the method of Botev et al.
--
-- The result is a pair of vectors, containing:
--
-- * The coordinates of each mesh point.
--
-- * Density estimates at each mesh point.
kde_ :: (G.Vector v CD, G.Vector v Double, G.Vector v Int)
=> Int
-- ^ The number of mesh points to use in the uniform discretization
-- of the interval @(min,max)@. If this value is not a power of
-- two, then it is rounded up to the next power of two.
-> Double
-- ^ Lower bound (@min@) of the mesh range.
-> Double
-- ^ Upper bound (@max@) of the mesh range.
-> v Double
-> (v Double, v Double)
kde_ n0 min max xs
| G.null xs = error "Statistics.KernelDensity.kde: empty sample"
| n0 <= 1 = error "Statistics.KernelDensity.kde: invalid number of points"
| otherwise = (mesh, density)
where
mesh = G.generate ni $ \z -> min + (d * fromIntegral z)
where d = r / (n-1)
density = G.map (/(2 * r)) . idct $ G.zipWith f a (G.enumFromTo 0 (n-1))
where f b z = b * exp (sqr z * sqr pi * t_star * (-0.5))
!n = fromIntegral ni
!ni = nextHighestPowerOfTwo n0
!r = max - min
a = dct . G.map (/ sum h) $ h
where h = G.map (/ len) $ histogram_ ni min max xs
!len = fromIntegral (G.length xs)
!t_star = fromRoot (0.28 * len ** (-0.4)) . ridders def{ riddersTol = AbsTol 1e-14 } (0,0.1)
$ \x -> x - (len * (2 * sqrt pi) * go 6 (f 7 x)) ** (-0.4)
where
f q t = 2 * pi ** (q*2) * sum (G.zipWith g iv a2v)
where g i a2 = i ** q * a2 * exp ((-i) * sqr pi * t)
a2v = G.map (sqr . (*0.5)) $ G.tail a
iv = G.map sqr $ G.enumFromTo 1 (n-1)
go s !h | s == 1 = h
| otherwise = go (s-1) (f s time)
where time = (2 * const * k0 / len / h) ** (2 / (3 + 2 * s))
const = (1 + 0.5 ** (s+0.5)) / 3
k0 = U.product (G.enumFromThenTo 1 3 (2*s-1)) / m_sqrt_2_pi
sqr x = x * x
{-# INLINABLE kde_ #-}
{-# SPECIAlIZE kde_ :: Int -> Double -> Double -> U.Vector Double -> (U.Vector Double, U.Vector Double) #-}
{-# SPECIAlIZE kde_ :: Int -> Double -> Double -> V.Vector Double -> (V.Vector Double, V.Vector Double) #-}
-- $references
--
-- Botev. Z.I., Grotowski J.F., Kroese D.P. (2010). Kernel density
-- estimation via diffusion. /Annals of Statistics/
-- 38(5):2916–2957. <http://arxiv.org/pdf/1011.2602>
| bos/statistics | Statistics/Sample/KernelDensity.hs | bsd-2-clause | 4,936 | 0 | 17 | 1,256 | 1,194 | 655 | 539 | 60 | 1 |
{-# LANGUAGE CPP,
FlexibleContexts,
FlexibleInstances,
UndecidableInstances,
TypeFamilies #-}
module Language.Hakaru.Runtime.CmdLine where
import qualified Data.Vector.Unboxed as U
import qualified System.Random.MWC as MWC
import Control.Monad (liftM, ap, forever)
#if __GLASGOW_HASKELL__ < 710
import Data.Functor
import Control.Applicative (Applicative(..))
#endif
newtype Measure a = Measure { unMeasure :: MWC.GenIO -> IO (Maybe a) }
instance Functor Measure where
fmap = liftM
{-# INLINE fmap #-}
instance Applicative Measure where
pure x = Measure $ \_ -> return (Just x)
{-# INLINE pure #-}
(<*>) = ap
{-# INLINE (<*>) #-}
instance Monad Measure where
return = pure
{-# INLINE return #-}
m >>= f = Measure $ \g -> do
Just x <- unMeasure m g
unMeasure (f x) g
{-# INLINE (>>=) #-}
makeMeasure :: (MWC.GenIO -> IO a) -> Measure a
makeMeasure f = Measure $ \g -> Just <$> f g
{-# INLINE makeMeasure #-}
-- A class of types that can be parsed from command line arguments
class Parseable a where
parse :: String -> IO a
instance Parseable Int where
parse = return . read
instance Parseable Double where
parse = return . read
instance (U.Unbox a, Parseable a) => Parseable (U.Vector a) where
parse s = U.fromList <$> ((mapM parse) =<< (lines <$> readFile s))
instance (Read a, Read b) => Parseable (a, b) where
parse = return . read
{- Make main needs to recur down the function type while at the term level build
-- up a continuation of parses and partial application of the function
-}
class MakeMain p where
makeMain :: p -> [String] -> IO ()
instance {-# OVERLAPPABLE #-}
Show a => MakeMain a where
makeMain p _ = print p
instance Show a => MakeMain (Measure a) where
makeMain p _ = MWC.createSystemRandom >>= \gen ->
forever $ do
ms <- unMeasure p gen
case ms of
Nothing -> return ()
Just s -> print s
instance (Parseable a, MakeMain b)
=> MakeMain (a -> b) where
makeMain p (a:as) = do a' <- parse a
makeMain (p a') as
makeMain _ [] = error "not enough arguments"
| zachsully/hakaru | haskell/Language/Hakaru/Runtime/CmdLine.hs | bsd-3-clause | 2,391 | 0 | 15 | 767 | 661 | 349 | 312 | 57 | 1 |
{-# LANGUAGE RecordWildCards, ViewPatterns #-}
module Development.Bake.Build(
ovenIncremental, incrementalDone, incrementalStart
) where
import Development.Bake.Core.Type
import Development.Shake.Command
import Control.Monad.Extra
import Control.Applicative
import System.FilePath
import Control.Exception.Extra
import System.Directory
import General.Extra
import Prelude
-- Files involved:
-- ../bake-incremental.txt, stores the directory name of the most recent successful increment
-- .bake.incremental exists if you have done an increment yourself, or copied from someone who has
-- we always use the most recent increment to build onwards from
-- | This requires a version of @cp@. On Windows, you can get that here:
-- <http://gnuwin32.sourceforge.net/packages/coreutils.htm>
ovenIncremental :: Oven state patch test -> Oven state patch test
ovenIncremental oven@Oven{..} = oven{ovenPrepare = \s ps -> do incPrepare s ps; ovenPrepare s ps}
where
incPrepare s ps = ignore $ do
-- if i have already been incremental'd (via copy, or via completion) don't do anything
unlessM (doesFileExist ".bake.incremental") $ do
src <- takeWhile (/= '\n') <$> readFile "../bake-incremental.txt"
whenM (doesFileExist $ ".." </> src </> ".bake.incremental") $ do
putStrLn $ "Preparing by copying from " ++ src
timed "copying for ovenIncremental" $
unit $ cmd "cp --preserve=timestamps --recursive --no-target-directory" ("../" ++ src) "."
incrementalStart :: IO ()
incrementalStart =
writeFile ".bake.incremental" ""
incrementalDone :: IO ()
incrementalDone = do
incrementalStart
x <- getCurrentDirectory
writeFile "../bake-incremental.txt" $ unlines [takeFileName x]
| Pitometsu/bake | src/Development/Bake/Build.hs | bsd-3-clause | 1,821 | 9 | 18 | 378 | 323 | 172 | 151 | 29 | 1 |
{-# LANGUAGE OverloadedStrings #-}
module FormatMultinom ( formatMultinom
, formatMultinoms
) where
import Data.Foldable
import Data.Monoid
import qualified Data.Text.Lazy.IO as TL
import qualified Data.Text.Lazy.Builder as TB
import Data.Text.Lazy.Builder.RealFloat
import qualified Data.Map as M
import BayesStack.DirMulti
formatMultinom :: (Real w, Ord a, Enum a)
=> (a -> TB.Builder) -> Maybe Int -> Multinom w a -> TB.Builder
formatMultinom show n = foldMap formatElem . takeTop . toList . decProbabilities
where formatElem (p,x) =
"\t" <> show x <> "\t" <> formatRealFloat Exponent (Just 3) p <> "\n"
takeTop = maybe id take n
formatMultinoms :: (Real w, Ord k, Ord a, Enum a)
=> (k -> TB.Builder) -> (a -> TB.Builder) -> Maybe Int
-> M.Map k (Multinom w a) -> TB.Builder
formatMultinoms showKey showElem n = foldMap go . M.assocs
where go (k,v) = showKey k <> "\n"
<> formatMultinom showElem n v <> "\n"
| beni55/bayes-stack | network-topic-models/FormatMultinom.hs | bsd-3-clause | 1,101 | 0 | 12 | 335 | 351 | 190 | 161 | 22 | 1 |
{-# LANGUAGE GeneralizedNewtypeDeriving, DeriveDataTypeable, StandaloneDeriving, OverloadedStrings #-}
{-# OPTIONS_GHC -fno-warn-orphans #-}
-----------------------------------------------------------------------------
-- |
-- Module : Distribution.Server.Framework.ResponseContentTypes
-- Copyright : (c) David Himmelstrup 2008
-- Duncan Coutts 2008
-- License : BSD-like
--
-- Maintainer : duncan@community.haskell.org
-- Stability : provisional
-- Portability : portable
--
-- Types for various kinds of resources we serve, xml, package tarballs etc.
-----------------------------------------------------------------------------
module Distribution.Server.Framework.ResponseContentTypes where
import Distribution.Server.Framework.BlobStorage
( BlobId, blobMd5 )
import Distribution.Server.Framework.MemSize
import Distribution.Server.Framework.Instances ()
import Distribution.Server.Util.Parse (packUTF8)
import Happstack.Server
( ToMessage(..), Response(..), RsFlags(..), Length(NoContentLength), nullRsFlags, mkHeaders
, noContentLength )
import qualified Data.ByteString.Lazy as BS.Lazy
import Data.Digest.Pure.MD5 (MD5Digest)
import Text.RSS (RSS)
import qualified Text.RSS as RSS (rssToXML, showXML)
import qualified Text.XHtml.Strict as XHtml (Html, showHtml)
import qualified Data.Aeson as Aeson (Value, encode)
import Data.Time.Clock (UTCTime)
import qualified Data.Time.Format as Time (formatTime)
import System.Locale (defaultTimeLocale)
import Text.CSV (printCSV, CSV)
import Control.DeepSeq
data IndexTarball = IndexTarball !BS.Lazy.ByteString !Int !MD5Digest !UTCTime
instance ToMessage IndexTarball where
toResponse (IndexTarball bs len md5 time) = mkResponseLen bs len
[ ("Content-Type", "application/x-gzip")
, ("Content-MD5", md5str)
, ("Last-modified", formatLastModifiedTime time)
]
where md5str = show md5
instance NFData IndexTarball where
rnf (IndexTarball a b c d) = rnf (a,b,c,d)
instance MemSize IndexTarball where
memSize (IndexTarball a b c d) = memSize4 a b c d
data PackageTarball = PackageTarball BS.Lazy.ByteString BlobId UTCTime
instance ToMessage PackageTarball where
toResponse (PackageTarball bs blobid time) = mkResponse bs
[ ("Content-Type", "application/x-gzip")
, ("Content-MD5", md5sum)
, ("Last-modified", formatLastModifiedTime time)
]
where md5sum = blobMd5 blobid
data DocTarball = DocTarball BS.Lazy.ByteString BlobId
instance ToMessage DocTarball where
toResponse (DocTarball bs blobid) = mkResponse bs
[ ("Content-Type", "application/x-tar")
, ("Content-MD5", md5sum)
]
where md5sum = blobMd5 blobid
formatLastModifiedTime :: UTCTime -> String
formatLastModifiedTime = Time.formatTime defaultTimeLocale rfc822DateFormat
where
-- HACK! we're using UTC but http requires GMT
-- hopefully it's ok to just say it's GMT
rfc822DateFormat = "%a, %d %b %Y %H:%M:%S GMT"
newtype OpenSearchXml = OpenSearchXml BS.Lazy.ByteString
instance ToMessage OpenSearchXml where
toContentType _ = "application/opensearchdescription+xml"
toMessage (OpenSearchXml bs) = bs
instance ToMessage Aeson.Value where
toContentType _ = "application/json; charset=utf-8"
toMessage val = Aeson.encode val
newtype CabalFile = CabalFile BS.Lazy.ByteString
instance ToMessage CabalFile where
toContentType _ = "text/plain; charset=utf-8"
toMessage (CabalFile bs) = bs
newtype BuildLog = BuildLog BS.Lazy.ByteString
instance ToMessage BuildLog where
toContentType _ = "text/plain"
toMessage (BuildLog bs) = bs
instance ToMessage RSS where
toContentType _ = "application/rss+xml"
toMessage = packUTF8 . RSS.showXML . RSS.rssToXML
newtype XHtml = XHtml XHtml.Html
instance ToMessage XHtml where
toContentType _ = "text/html; charset=utf-8"
toMessage (XHtml xhtml) = packUTF8 (XHtml.showHtml xhtml)
-- Like XHtml, but don't bother calculating length
newtype LongXHtml = LongXHtml XHtml.Html
instance ToMessage LongXHtml where
toResponse (LongXHtml xhtml) = noContentLength $ mkResponse
(packUTF8 (XHtml.showHtml xhtml))
[("Content-Type", "text/html")]
newtype ExportTarball = ExportTarball BS.Lazy.ByteString
instance ToMessage ExportTarball where
toResponse (ExportTarball bs)
= noContentLength $ mkResponse bs
[("Content-Type", "application/gzip")]
newtype CSVFile = CSVFile CSV
instance ToMessage CSVFile where
toContentType _ = "text/csv"
toMessage (CSVFile csv) = packUTF8 (printCSV csv)
mkResponse :: BS.Lazy.ByteString -> [(String, String)] -> Response
mkResponse bs headers = Response {
rsCode = 200,
rsHeaders = mkHeaders headers,
rsFlags = nullRsFlags,
rsBody = bs,
rsValidator = Nothing
}
mkResponseLen :: BS.Lazy.ByteString -> Int -> [(String, String)] -> Response
mkResponseLen bs len headers = Response {
rsCode = 200,
rsHeaders = mkHeaders (("Content-Length", show len) : headers),
rsFlags = nullRsFlags { rsfLength = NoContentLength },
rsBody = bs,
rsValidator = Nothing
}
| haskell-infra/hackage-server | Distribution/Server/Framework/ResponseContentTypes.hs | bsd-3-clause | 5,145 | 0 | 12 | 900 | 1,182 | 679 | 503 | 107 | 1 |
{-# LANGUAGE TypeOperators, RankNTypes #-}
{- $Id: AFRPMiscellany.hs,v 1.4 2003/11/10 21:28:58 antony Exp $
******************************************************************************
* A F R P *
* *
* Module: AFRPMiscellany *
* Purpose: Collection of entities that really should be part *
* of the Haskell 98 prelude or simply have no better *
* home. *
* Authors: Henrik Nilsson and Antony Courtney *
* *
* Copyright (c) Yale University, 2003 *
* *
******************************************************************************
-}
module AFRPMiscellany (
-- Reverse function composition
( # ), -- :: (a -> b) -> (b -> c) -> (a -> c), infixl 9
-- Arrow plumbing aids
dup, -- :: a -> (a,a)
swap, -- :: (a,b) -> (b,a)
-- Maps over lists of pairs
mapFst, -- :: (a -> b) -> [(a,c)] -> [(b,c)]
mapSnd, -- :: (a -> b) -> [(c,a)] -> [(c,b)]
-- Generalized tuple selectors
sel3_1, sel3_2, sel3_3,
sel4_1, sel4_2, sel4_3, sel4_4,
sel5_1, sel5_2, sel5_3, sel5_4, sel5_5,
-- Floating point utilities
fDiv, -- :: (RealFrac a, Integral b) => a -> a -> b
fMod, -- :: RealFrac a => a -> a -> a
fDivMod -- :: (RealFrac a, Integral b) => a -> a -> (b, a)
) where
infixl 9 #
infixl 7 `fDiv`, `fMod`
------------------------------------------------------------------------------
-- Reverse function composition
------------------------------------------------------------------------------
( # ) :: (a -> b) -> (b -> c) -> (a -> c)
f # g = g . f
------------------------------------------------------------------------------
-- Arrow plumbing aids
------------------------------------------------------------------------------
dup :: a -> (a,a)
dup x = (x,x)
swap :: (a,b) -> (b,a)
swap ~(x,y) = (y,x)
------------------------------------------------------------------------------
-- Maps over lists of pairs
------------------------------------------------------------------------------
mapFst :: (a -> b) -> [(a,c)] -> [(b,c)]
mapFst _ [] = []
mapFst f ((x, y) : xys) = (f x, y) : mapFst f xys
mapSnd :: (a -> b) -> [(c,a)] -> [(c,b)]
mapSnd _ [] = []
mapSnd f ((x, y) : xys) = (x, f y) : mapSnd f xys
------------------------------------------------------------------------------
-- Generalized tuple selectors
------------------------------------------------------------------------------
-- Triples
sel3_1 :: forall t t1 t2. (t, t1, t2) -> t
sel3_2 :: forall t t1 t2. (t, t1, t2) -> t1
sel3_1 (x,_,_) = x
sel3_3 :: forall t t1 t2. (t, t1, t2) -> t2
sel3_2 (_,x,_) = x
sel3_3 (_,_,x) = x
-- 4-tuples
sel4_1 :: forall t t1 t2 t3. (t, t1, t2, t3) -> t
sel4_2 :: forall t t1 t2 t3. (t, t1, t2, t3) -> t1
sel4_1 (x,_,_,_) = x
sel4_3 :: forall t t1 t2 t3. (t, t1, t2, t3) -> t2
sel4_2 (_,x,_,_) = x
sel4_4 :: forall t t1 t2 t3. (t, t1, t2, t3) -> t3
sel4_3 (_,_,x,_) = x
sel4_4 (_,_,_,x) = x
-- 5-tuples
sel5_1 ::(t, t1, t2, t3, t4) -> t
sel5_1 (x,_,_,_,_) = x
sel5_2 :: (t, t1, t2, t3, t4) -> t1
sel5_2 (_,x,_,_,_) = x
sel5_3 :: (t, t1, t2, t3, t4) -> t2
sel5_3 (_,_,x,_,_) = x
sel5_4 :: (t, t1, t2, t3, t4) -> t3
sel5_4 (_,_,_,x,_) = x
sel5_5 :: (t, t1, t2, t3, t4) -> t4
sel5_5 (_,_,_,_,x) = x
------------------------------------------------------------------------------
-- Floating point utilities
------------------------------------------------------------------------------
-- Floating-point div and modulo operators.
-- fDiv :: (RealFrac a, Integral b) => a -> a -> b
fDiv :: (RealFrac a) => a -> a -> Int
fDiv x y = fst (fDivMod x y)
fMod :: RealFrac a => a -> a -> a
fMod x y = snd $ fDivMod x y
fDivMod :: (RealFrac a) => a -> a -> (Int, a)
fDivMod x y = (q, r)
where
q = (floor (x/y))
r = x - fromIntegral q * y
| snowmantw/Frag | src/AFRPMiscellany.hs | gpl-2.0 | 4,300 | 0 | 10 | 1,295 | 1,147 | 699 | 448 | 59 | 1 |
fooer input = catMaybes . map Just $ input | mpickering/hlint-refactor | tests/examples/Default71.hs | bsd-3-clause | 42 | 0 | 7 | 8 | 20 | 9 | 11 | 1 | 1 |
<?xml version="1.0" encoding="UTF-8"?><!DOCTYPE helpset PUBLIC "-//Sun Microsystems Inc.//DTD JavaHelp HelpSet Version 2.0//EN" "http://java.sun.com/products/javahelp/helpset_2_0.dtd">
<helpset version="2.0" xml:lang="ms-MY">
<title>Passive Scan Rules - Alpha | ZAP Extension</title>
<maps>
<homeID>top</homeID>
<mapref location="map.jhm"/>
</maps>
<view>
<name>TOC</name>
<label>Contents</label>
<type>org.zaproxy.zap.extension.help.ZapTocView</type>
<data>toc.xml</data>
</view>
<view>
<name>Index</name>
<label>Index</label>
<type>javax.help.IndexView</type>
<data>index.xml</data>
</view>
<view>
<name>Search</name>
<label>Search</label>
<type>javax.help.SearchView</type>
<data engine="com.sun.java.help.search.DefaultSearchEngine">
JavaHelpSearch
</data>
</view>
<view>
<name>Favorites</name>
<label>Favorites</label>
<type>javax.help.FavoritesView</type>
</view>
</helpset> | thc202/zap-extensions | addOns/pscanrulesAlpha/src/main/javahelp/org/zaproxy/zap/extension/pscanrulesAlpha/resources/help_ms_MY/helpset_ms_MY.hs | apache-2.0 | 987 | 78 | 67 | 162 | 420 | 212 | 208 | -1 | -1 |
{-# LANGUAGE CPP #-}
{-# LANGUAGE RankNTypes #-}
-- | The Name Cache
module NameCache
( lookupOrigNameCache
, extendOrigNameCache
, extendNameCache
, initNameCache
, NameCache(..), OrigNameCache
) where
import Module
import Name
import UniqSupply
import TysWiredIn
import Util
import Outputable
import PrelNames
#include "HsVersions.h"
{-
Note [The Name Cache]
~~~~~~~~~~~~~~~~~~~~~
The Name Cache makes sure that, during any invocation of GHC, each
External Name "M.x" has one, and only one globally-agreed Unique.
* The first time we come across M.x we make up a Unique and record that
association in the Name Cache.
* When we come across "M.x" again, we look it up in the Name Cache,
and get a hit.
The functions newGlobalBinder, allocateGlobalBinder do the main work.
When you make an External name, you should probably be calling one
of them.
Note [Built-in syntax and the OrigNameCache]
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Built-in syntax like tuples and unboxed sums are quite ubiquitous. To lower
their cost we use two tricks,
a. We specially encode tuple and sum Names in interface files' symbol tables
to avoid having to look up their names while loading interface files.
Namely these names are encoded as by their Uniques. We know how to get from
a Unique back to the Name which it represents via the mapping defined in
the SumTupleUniques module. See Note [Symbol table representation of names]
in BinIface and for details.
b. We don't include them in the Orig name cache but instead parse their
OccNames (in isBuiltInOcc_maybe) to avoid bloating the name cache with
them.
Why is the second measure necessary? Good question; afterall, 1) the parser
emits built-in syntax directly as Exact RdrNames, and 2) built-in syntax never
needs to looked-up during interface loading due to (a). It turns out that there
are two reasons why we might look up an Orig RdrName for built-in syntax,
* If you use setRdrNameSpace on an Exact RdrName it may be
turned into an Orig RdrName.
* Template Haskell turns a BuiltInSyntax Name into a TH.NameG
(DsMeta.globalVar), and parses a NameG into an Orig RdrName
(Convert.thRdrName). So, e.g. $(do { reify '(,); ... }) will
go this route (Trac #8954).
-}
-- | Per-module cache of original 'OccName's given 'Name's
type OrigNameCache = ModuleEnv (OccEnv Name)
lookupOrigNameCache :: OrigNameCache -> Module -> OccName -> Maybe Name
lookupOrigNameCache nc mod occ
| mod == gHC_TYPES || mod == gHC_PRIM || mod == gHC_TUPLE
, Just name <- isBuiltInOcc_maybe occ
= -- See Note [Known-key names], 3(c) in PrelNames
-- Special case for tuples; there are too many
-- of them to pre-populate the original-name cache
Just name
| otherwise
= case lookupModuleEnv nc mod of
Nothing -> Nothing
Just occ_env -> lookupOccEnv occ_env occ
extendOrigNameCache :: OrigNameCache -> Name -> OrigNameCache
extendOrigNameCache nc name
= ASSERT2( isExternalName name, ppr name )
extendNameCache nc (nameModule name) (nameOccName name) name
extendNameCache :: OrigNameCache -> Module -> OccName -> Name -> OrigNameCache
extendNameCache nc mod occ name
= extendModuleEnvWith combine nc mod (unitOccEnv occ name)
where
combine _ occ_env = extendOccEnv occ_env occ name
-- | The NameCache makes sure that there is just one Unique assigned for
-- each original name; i.e. (module-name, occ-name) pair and provides
-- something of a lookup mechanism for those names.
data NameCache
= NameCache { nsUniqs :: !UniqSupply,
-- ^ Supply of uniques
nsNames :: !OrigNameCache
-- ^ Ensures that one original name gets one unique
}
-- | Return a function to atomically update the name cache.
initNameCache :: UniqSupply -> [Name] -> NameCache
initNameCache us names
= NameCache { nsUniqs = us,
nsNames = initOrigNames names }
initOrigNames :: [Name] -> OrigNameCache
initOrigNames names = foldl extendOrigNameCache emptyModuleEnv names
| olsner/ghc | compiler/basicTypes/NameCache.hs | bsd-3-clause | 4,102 | 0 | 12 | 865 | 416 | 224 | 192 | 46 | 2 |
module T () where
import Language.Haskell.Liquid.Prelude
{-@ assert zipW :: (a -> b -> c) -> xs : [a] -> ys:{v:[b] | len(v) = len(xs)} -> {v : [c] | len(v) = len(xs)} @-}
zipW :: (a->b->c) -> [a]->[b]->[c]
zipW f (a:as) (b:bs) = f a b : zipW f as bs
zipW _ [] [] = []
zipW _ [] (_:_) = liquidError "zipWith1"
zipW _ (_:_) [] = liquidError "zipWith1"
{-@ assert foo :: (a -> b -> c) -> xs : [a] -> ys:{v:[b] | len(v) = len(xs)} -> {v : [c] | len(v) = len(xs)} @-}
foo = zipW
| ssaavedra/liquidhaskell | tests/pos/zipW.hs | bsd-3-clause | 496 | 0 | 8 | 125 | 166 | 90 | 76 | 8 | 1 |
module NegativeIn3 where
f :: Maybe a -> Int -> Int
f x y = x * 2
| kmate/HaRe | old/testing/introCase/NegativeIn3_TokOut.hs | bsd-3-clause | 69 | 0 | 6 | 21 | 34 | 18 | 16 | 3 | 1 |
import Test.Cabal.Prelude
-- Test missing internal build-tool-depends does indeed fail
main = setupAndCabalTest $ do
assertOutputContains "missing internal executable"
=<< fails (setup' "configure" [])
| mydaum/cabal | cabal-testsuite/PackageTests/BuildToolDependsInternalMissing/setup.test.hs | bsd-3-clause | 214 | 0 | 12 | 36 | 40 | 20 | 20 | 4 | 1 |
module Main where
import qualified Data.Set as Set
import Control.Monad
import Data.List
---
---
---
data Direction = DirUp | DirLeft | DirRight | DirDown
deriving (Eq,Ord,Show,Read)
directions = [DirUp,DirLeft,DirRight,DirDown]
coordOffset DirUp = (-1,0)
coordOffset DirLeft = (0,-1)
coordOffset DirRight = (0,1)
coordOffset DirDown = (1,0)
move (r,c) d = (r+dr,c+dc) where (dr,dc) = coordOffset d
sortPair (x,y) =
case compare x y of
EQ -> (x,y)
LT -> (x,y)
GT -> (y,x)
mapPair12 f (x,y) = (f x,f y)
cachedUsingList f = f'
where
list = map f [0..]
f' i = list !! i
nubSorted [] = []
nubSorted (x:xs) = nubSorted' x xs
where
nubSorted' x [] = [x]
nubSorted' x (y:ys)
| x == y = nubSorted' x ys
| otherwise = x : nubSorted' y ys
---
---
---
size = 21
largestExplicitlyEnumeratedArea = 7
type Cell = (Int,Int)
type Edge = (Cell,Cell)
mkEdge cell1 cell2 = sortPair (cell1,cell2)
cellsAround area = nubSorted $ sort $
do
cell <- area
dir <- directions
let cell2 = move cell dir
guard $ cell2 `notElem` area
return $ cell2
increaseAreas areas = nubSorted $ sort $
do
area <- areas
cell2 <- cellsAround area
return $ sort $ cell2 : area
getAreas :: Int -> [[Cell]]
getAreasRaw 1 = [[(0,0)]]
getAreasRaw n = areas
where
areas = increaseAreas $ getAreas $ n - 1
getAreas = cachedUsingList getAreasRaw
getEdges area = mapPair12 (map snd) $ partition fst $ nubSorted $ sort $
do
cell <- area
dir <- directions
let cell2 = move cell dir
let isInternal = cell2 `elem` area
return (isInternal,mkEdge cell cell2)
type SizedArea = (Int,((Set.Set Cell,Set.Set Cell),(Set.Set Edge,Set.Set Edge)))
getExtendedAreas n =
do
area <- getAreas n
let areaAround = cellsAround area
let edgeInfo = getEdges area
return ((Set.fromList area,Set.fromList areaAround),mapPair12 Set.fromList edgeInfo)
getSizedAreasThrough :: Int -> [SizedArea]
getSizedAreasThrough n =
do
n' <- [1 .. n]
extendedArea <- getExtendedAreas n'
return $ (n',extendedArea)
sizeForSizedArea (asize,_) = asize
allSizedAreas = getSizedAreasThrough largestExplicitlyEnumeratedArea
main = print $ allSizedAreas
| ezyang/ghc | testsuite/tests/rts/T2047.hs | bsd-3-clause | 2,365 | 0 | 11 | 645 | 953 | 503 | 450 | 72 | 3 |
{-# OPTIONS_GHC -fno-warn-redundant-constraints #-}
{-# LANGUAGE PatternSynonyms, GADTs, ViewPatterns #-}
-- Pattern synonyms
module ShouldCompile where
data T a where
MkT :: (Eq b) => a -> b -> T a
f :: (Show a) => a -> Bool
f = undefined
pattern P{x} <- MkT (f -> True) x
| ezyang/ghc | testsuite/tests/patsyn/should_compile/records-req.hs | bsd-3-clause | 281 | 0 | 8 | 57 | 86 | 49 | 37 | 8 | 1 |
module ShadowVarianceChebyshev0 where
chebyshev :: (Float, Float) -> Float -> Float
chebyshev (d, ds) t =
let p = if t <= d then 1.0 else 0.0
variance = ds - (d * d)
du = t - d
p_max = variance / (variance + (du * du))
in max p p_max
factor :: (Float, Float) -> Float -> Float
factor = chebyshev
| io7m/r2 | com.io7m.r2.documentation/src/main/resources/com/io7m/r2/documentation/haskell/ShadowVarianceChebyshev0.hs | isc | 336 | 0 | 13 | 103 | 139 | 78 | 61 | 10 | 2 |
module Shaker.Reflexivite (
RunnableFunction(..)
-- * Collect module information functions
,runFunction
)
where
import Control.Exception as C
import Control.Monad.Reader
import Data.Maybe
import DynFlags
import GHC
import GHC.Paths
import Shaker.Action.Compile
import Shaker.GhcInterface
import Shaker.Type
import Unsafe.Coerce
data RunnableFunction = RunnableFunction {
runnableFunctionModule :: [String]
,runnableLibrairies :: [String]
,runnableFunctionFunction :: String -- The function name. Should have IO() as signature
}
deriving Show
-- | Compile, load and run the given function
runFunction :: CompileInput -> RunnableFunction -> Shaker IO()
runFunction cpIn (RunnableFunction importModuleList listLibs fun) = do
listInstalledPkgId <- fmap catMaybes (mapM searchInstalledPackageId listLibs)
dynFun <- lift $ runGhc (Just libdir) $ do
dflags <- getSessionDynFlags
_ <- setSessionDynFlags (addLibraryToDynFlags listInstalledPkgId (dopt_set dflags Opt_HideAllPackages))
_ <- ghcCompile cpIn
configureContext importModuleList
value <- compileExpr fun
do let value' = unsafeCoerce value :: a
return value'
_ <- lift $ handleActionInterrupt dynFun
return ()
where
genTuple :: ModSummary -> (Module, Maybe (ImportDecl RdrName))
genTuple modSummary = (ms_mod modSummary, Nothing)
configureContext [] = do
modGraph <- getModuleGraph
setContext [] (map genTuple modGraph)
configureContext imports = do
mods <- mapM (\a -> findModule (mkModuleName a) Nothing) imports
setContext [] $ map (\m -> (m, Nothing) ) mods
handleActionInterrupt :: IO() -> IO()
handleActionInterrupt = C.handle catchAll
where catchAll :: C.SomeException -> IO ()
catchAll e = putStrLn ("Shaker caught " ++ show e ) >> return ()
| bonnefoa/Shaker | src/Shaker/Reflexivite.hs | isc | 1,882 | 0 | 16 | 403 | 521 | 265 | 256 | 43 | 2 |
-- | Event module. Wait until OpenCL commands are done.
module Data.OpenCL.Event
( CLEvent()
, waitEvents )
where
import Control.Concurrent.MVar
import Control.Monad.IO.Class
import Control.Monad.Catch
import Control.Monad.Primitive
import Data.Foldable
import Data.OpenCL.Exception
import Data.OpenCL.Handle
import Data.OpenCL.Raw
import Data.Traversable
import Foreign.Marshal.Array
-- | Waits until all events listed are done.
waitEvents :: MonadIO m => [CLEvent] -> m ()
waitEvents wait_events = liftIO $ do
evs <- for wait_events $ readMVar . handleEvent
flip finally (for_ wait_events $ touch . handleEvent) $
withArray evs $ \evs_array ->
clErrorify $ wait_for_events (fromIntegral $ length wait_events)
evs_array
| Noeda/opencl-bindings | src/Data/OpenCL/Event.hs | isc | 780 | 0 | 14 | 151 | 190 | 106 | 84 | 22 | 1 |
{-# LANGUAGE CPP #-}
module Data.Streaming.Network.Internal
( ServerSettings (..)
, ClientSettings (..)
, HostPreference (..)
, Message (..)
, AppData (..)
#if !WINDOWS
, ServerSettingsUnix (..)
, ClientSettingsUnix (..)
, AppDataUnix (..)
#endif
) where
import Data.String (IsString (..))
import Data.ByteString (ByteString)
import Network.Socket (Socket, SockAddr, Family)
-- | Settings for a TCP server. It takes a port to listen on, and an optional
-- hostname to bind to.
data ServerSettings = ServerSettings
{ serverPort :: !Int
, serverHost :: !HostPreference
, serverSocket :: !(Maybe Socket) -- ^ listening socket
, serverAfterBind :: !(Socket -> IO ())
, serverNeedLocalAddr :: !Bool
, serverReadBufferSize :: !Int
}
-- | Settings for a TCP client, specifying how to connect to the server.
data ClientSettings = ClientSettings
{ clientPort :: !Int
, clientHost :: !ByteString
, clientAddrFamily :: !Family
, clientReadBufferSize :: !Int
}
-- | Which host to bind.
--
-- Note: The @IsString@ instance recognizes the following special values:
--
-- * @*@ means @HostAny@ - "any IPv4 or IPv6 hostname"
--
-- * @*4@ means @HostIPv4@ - "any IPv4 or IPv6 hostname, IPv4 preferred"
--
-- * @!4@ means @HostIPv4Only@ - "any IPv4 hostname"
--
-- * @*6@ means @HostIPv6@@ - "any IPv4 or IPv6 hostname, IPv6 preferred"
--
-- * @!6@ means @HostIPv6Only@ - "any IPv6 hostname"
--
-- Note that the permissive @*@ values allow binding to an IPv4 or an
-- IPv6 hostname, which means you might be able to successfully bind
-- to a port more times than you expect (eg once on the IPv4 localhost
-- 127.0.0.1 and again on the IPv6 localhost 0:0:0:0:0:0:0:1).
--
-- Any other value is treated as a hostname. As an example, to bind to the
-- IPv4 local host only, use \"127.0.0.1\".
data HostPreference =
HostAny
| HostIPv4
| HostIPv4Only
| HostIPv6
| HostIPv6Only
| Host String
deriving (Eq, Ord, Show, Read)
instance IsString HostPreference where
fromString "*" = HostAny
fromString "*4" = HostIPv4
fromString "!4" = HostIPv4Only
fromString "*6" = HostIPv6
fromString "!6" = HostIPv6Only
fromString s = Host s
#if !WINDOWS
-- | Settings for a Unix domain sockets server.
data ServerSettingsUnix = ServerSettingsUnix
{ serverPath :: !FilePath
, serverAfterBindUnix :: !(Socket -> IO ())
, serverReadBufferSizeUnix :: !Int
}
-- | Settings for a Unix domain sockets client.
data ClientSettingsUnix = ClientSettingsUnix
{ clientPath :: !FilePath
, clientReadBufferSizeUnix :: !Int
}
-- | The data passed to a Unix domain sockets @Application@.
data AppDataUnix = AppDataUnix
{ appReadUnix :: !(IO ByteString)
, appWriteUnix :: !(ByteString -> IO ())
}
#endif
-- | Representation of a single UDP message
data Message = Message { msgData :: {-# UNPACK #-} !ByteString
, msgSender :: !SockAddr
}
-- | The data passed to an @Application@.
data AppData = AppData
{ appRead' :: !(IO ByteString)
, appWrite' :: !(ByteString -> IO ())
, appSockAddr' :: !SockAddr
, appLocalAddr' :: !(Maybe SockAddr)
, appCloseConnection' :: !(IO ())
, appRawSocket' :: Maybe Socket
}
| fpco/streaming-commons | Data/Streaming/Network/Internal.hs | mit | 3,297 | 0 | 13 | 742 | 564 | 338 | 226 | 105 | 0 |
{-# LANGUAGE ScopedTypeVariables, BangPatterns #-}
{-# LANGUAGE ExplicitNamespaces, Arrows #-}
module Document.Document where
--
-- Modules
--
import Document.Machine as Mch
import Document.Pipeline
import Document.Phase as P
import Document.Phase.Types
import Document.Phase.Structures as Mch
import Document.Phase.Declarations as Mch
import Document.Phase.Expressions as Mch
import Document.Phase.Proofs as Mch
import Document.Visitor hiding (hoistEither)
import Latex.Parser
import Logic.Expr
import Logic.Proof
import UnitB.UnitB
--
-- Libraries
--
import Control.Arrow hiding (left,app) -- (Arrow,arr,(>>>))
import Control.Category
import Control.Lens
import Control.Lens.Misc
import Control.Monad
import qualified Control.Monad.Reader as R
import Control.Monad.Trans
import Control.Monad.Trans.Either
import qualified Control.Monad.Writer as W
import Data.Either.Combinators
import Data.List.Ordered (sortOn)
import qualified Data.List.NonEmpty as NE
import Data.Map as M hiding ( map, (\\) )
import qualified Data.Map as M
import Data.Semigroup
import Prelude hiding ((.),id)
import Utilities.Syntactic as Syn
read_document :: LatexDoc -> Either [Error] System
read_document = parseWith system
{-# INLINABLE parseWith #-}
parseWith :: Pipeline MM () a -> LatexDoc -> Either [Error] a
parseWith parser xs = mapBoth (sortOn line_info . shrink_error_list) id $ do
let li = line_info xs
(ms,cs) <- get_components xs li
runPipeline' ms cs () parser
{-# INLINABLE system #-}
system :: Pipeline MM () System
system = run_phase0_blocks
>>> run_phase1_types
>>> run_phase2_vars
>>> run_phase3_exprs
>>> run_phase4_proofs
>>> wrap_machine
wrap_machine :: Pipeline MM SystemP4 System
wrap_machine = proc m4 -> do
sys <- liftP id -< m4 & mchMap (M.traverseWithKey make_machine)
returnA -< create' $ do
machines .= sys^.mchMap
ref_struct .= (Nothing <$ sys^.mchMap)
ref_struct %= M.union (sys^.refineStruct.to (M.map Just .edges))
all_machines :: LatexDoc -> Either [Error] System
all_machines xs = read_document xs
list_machines :: FilePath
-> EitherT [Error] IO [Machine]
list_machines fn = do
doc <- liftIO $ readFile fn
xs <- hoistEither $ latex_structure fn doc
ms <- hoistEither $ all_machines xs
return $ map snd $ toAscList $ ms!.machines
list_proof_obligations :: FilePath
-> EitherT [Error] IO [(Machine, Map Label Sequent)]
list_proof_obligations fn = do
xs <- list_machines fn
return [ (m,proof_obligation m) | m <- xs ]
list_file_obligations :: FilePath
-> IO (Either [Error] [(Machine, Map Label Sequent)])
list_file_obligations fn = do
runEitherT $ list_proof_obligations fn
parse_system :: FilePath -> IO (Either [Error] System)
parse_system fn = parse_system' $ pure fn
parse_system' :: NonEmpty FilePath -> IO (Either [Error] System)
parse_system' fs = runEitherT $ do
docs <- liftIO $ mapM readFile fs
xs <- hoistEither $ flip traverseValidation (NE.zip fs docs) $
\(fn,doc) -> do
latex_structure fn doc
hoistEither $ all_machines $ sconcat xs
parse_machine :: FilePath -> IO (Either [Error] [Machine])
parse_machine fn = runEitherT $ do
doc <- liftIO $ readFile fn
xs <- hoistEither $ latex_structure fn doc
ms <- hoistEither $ all_machines xs
return $ map snd $ toAscList $ ms!.machines
get_components :: LatexDoc -> LineInfo
-> Either [Error] (Map Name [LatexDoc],Map String [LatexDoc])
get_components xs li =
liftM g
$ R.runReader (runEitherT $ W.execWriterT
(mapM_ f $ contents' xs)) li
where
with_li li cmd = R.local (const li) cmd
get_name li xs = with_li li $ liftM fst $ lift $ get_1_lbl xs
f x@(EnvNode (Env _ tag li0 xs _li1))
| tag == "machine" = do
n <- get_name li0 xs
n' <- lift $ hoistEither $ Syn.with_li li $ isName n
W.tell ([(n',[xs])],[])
| tag == "context" = do
n <- get_name li0 xs
W.tell ([],[(n,[xs])])
| otherwise = map_docM_ f x
f x = map_docM_ f x
g (x,y) = (M.fromListWith (++) x, M.fromListWith (++) y)
syntaxSummary :: [String]
syntaxSummary = machineSyntax system
| literate-unitb/literate-unitb | src/Document/Document.hs | mit | 4,699 | 2 | 18 | 1,356 | 1,450 | 763 | 687 | -1 | -1 |
{-# LANGUAGE BangPatterns
, FunctionalDependencies
, CPP
, RecordWildCards
, TypeFamilies
, TypeOperators #-}
module BigPixel.PixelSize (
PixelSize (..)
) where
import Vision.Image.Class
import Vision.Image.Grey.Type (GreyPixel (..))
import Vision.Image.HSV.Type (HSVPixel (..))
import Vision.Image.RGBA.Type (RGBAPixel (..))
import Vision.Image.RGB.Type (RGBPixel (..))
import Data.Int
import Data.Word
import BigPixel.BigRGBPixel
import BigPixel.BigRGBAPixel
import BigPixel.BigGreyPixel
import BigPixel.BigHSVPixel
class PixelSize a b | a -> b where
expand :: a -> b
shrink :: b -> a
-- RGBA, RGB, and Gray values range from 0 to 255
-- H values are 0 - 179 (degrees), SV are 0 - 255
-- Since pixels are read and written in Word8, all clamps will assume those values, regardless of anything else. This means
-- in practice clamping should be the last operation performed.
instance PixelSize RGBPixel BigRGBPixel where
expand !(RGBPixel r g b) = BigRGBPixel (fromIntegral r :: Int16) (fromIntegral g :: Int16) (fromIntegral b :: Int16)
shrink !(BigRGBPixel r g b) = RGBPixel (word8 . clamp 0 255 $ r) (word8 . clamp 0 255 $ g) (word8 . clamp 0 255 $ b)
instance PixelSize RGBAPixel BigRGBAPixel where
expand !(RGBAPixel r g b a) = BigRGBAPixel (fromIntegral r :: Int16) (fromIntegral g :: Int16) (fromIntegral b :: Int16) (fromIntegral a :: Int16)
shrink !(BigRGBAPixel r g b a) = RGBAPixel (word8 . clamp 0 255 $ r) (word8 . clamp 0 255 $ g) (word8 . clamp 0 255 $ b) (word8 . clamp 0 255 $ a)
instance PixelSize GreyPixel BigGreyPixel where
expand !(GreyPixel g) = BigGreyPixel (fromIntegral g :: Int16)
shrink !(BigGreyPixel g) = GreyPixel (word8 . clamp 0 255 $ g)
instance PixelSize HSVPixel BigHSVPixel where
expand !(HSVPixel r g b) = BigHSVPixel (fromIntegral r :: Int16) (fromIntegral g :: Int16) (fromIntegral b :: Int16)
shrink !(BigHSVPixel r g b) = HSVPixel (word8 . clamp 0 179 $ r) (word8 . clamp 0 255 $ g) (word8 . clamp 0 255 $ b)
-- functions
clamp :: (Ord n) => n -> n -> n -> n
clamp min max num
| max < num = max
| min > num = min
| otherwise = num
word8 :: Integral a => a -> Word8
word8 = fromIntegral
| eklinkhammer/haskell-vision | BigPixel/PixelSize.hs | mit | 2,255 | 0 | 10 | 492 | 762 | 401 | 361 | -1 | -1 |
module EdExecute (execute) where
import System.IO (putStrLn)
import System.Exit (exitSuccess)
import Ed (ed)
import EdOption (EdOption(..))
import EdError (EdError(NO_INPUT_FILE), edError)
execute :: [EdOption] -> IO ()
execute [] = edError NO_INPUT_FILE ""
execute es = exe es ""
where exe (e:es') p = case e of
OError t s -> edError t s
OHelp -> edHelp
OVersion -> edVersion
OScript -> putStrLn "OScript running"
>> exitSuccess
OPrompt p' -> exe es' p'
OFile f -> ed f p
edHelp = (putStrLn $ "Usage: ed [options] file\n"
++ "Options:\n"
++ "\t-h\tDisplay this information\n"
++ "\t-v\tDisplay version information\n"
++ "\t-p [string]\tSpecify a command prompt\n"
++ "\t-s\tSuppress diagnostics")
>> exitSuccess
edVersion = (putStrLn $ "hed\n"
++ " -- line text editor implemented in Haskell\n"
++ "version 0.0.1")
>> exitSuccess
| demmys/hed | EdExecute.hs | mit | 1,236 | 0 | 12 | 531 | 261 | 138 | 123 | 28 | 6 |
module TestSuites.ParserHTMLSpec(spec, matches) where
--3rd party
import Test.Hspec.Contrib.HUnit(fromHUnitTest)
import Test.Hspec (hspec)
import Test.HUnit
--own
import HsPredictor.ParserHTML
main = hspec spec
spec = fromHUnitTest $ TestList [
TestLabel ">>readMatches" test_readMatches,
TestLabel ">>convertHtml" test_convertHtml,
TestLabel ">>fillHoles" test_fillHoles]
-- readMatches test data
matches = [
"15.08.2014,Swansea - Newcastle Utd, 1.92, 3.52, 4.06",
"30.08.2014, Everton - Chelsea, 3:6, 4.05, 3.42, 1.96",
"31.08.2014, Everton - Chelsea, 3:6, 4.05",
"32.08.2014,Everton - Chelsea, 3:6",
"33.08.2014, Everton - Chelsea",
"34.08.2014, Everton - Chelsea,"]
shouldBe = [
"2014.08.15,Swansea,Newcastle Utd,-1,-1,1.92,3.52,4.06 \n",
"2014.08.30,Everton,Chelsea,3,6,4.05,3.42,1.96 \n",
"2014.08.31,Everton,Chelsea,3,6,-1.0,-1.0,-1.0 \n",
"2014.08.32,Everton,Chelsea,3,6,-1.0,-1.0,-1.0 \n",
"2014.08.33,Everton,Chelsea,-1,-1,-1.0,-1.0,-1.0 \n",
"2014.08.34,Everton,Chelsea,-1,-1,-1.0,-1.0,-1.0 \n"]
-- convertHtml test data
resultsPath = "tests/tmp/results.html"
fixturesPath = "tests/tmp/fixtures.html"
fixturesTest = [
"15.08.2015 13:45, Southampton - Everton, 1.94, 3.48, 4.10",
"15.08.2015 16:00, Sunderland - Norwich, 2.64, 3.20, 2.81"]
fixturesMatchesTest = [
"2015.08.15,Southampton,Everton,-1,-1,1.94,3.48,4.10 \n",
"2015.08.15,Sunderland,Norwich,-1,-1,2.64,3.20,2.81 \n"]
resultsTest = [
"14.08.2015, Aston Villa - Manchester United, 0:1, 5.17, 3.63, 1.73",
"10.08.2015, West Brom - Manchester City, 0:3, 5.80, 3.99, 1.61",
"09.08.2015, Arsenal - West Ham, 0:2, 1.23, 6.32, 13.06",
"09.08.2015, Newcastle Utd - Southampton, 2:2, 2.94, 3.22, 2.52"]
resultsMatchesTest = [
"2015.08.14,Aston Villa,Manchester United,0,1,5.17,3.63,1.73 \n",
"2015.08.10,West Brom,Manchester City,0,3,5.80,3.99,1.61 \n",
"2015.08.09,Arsenal,West Ham,0,2,1.23,6.32,13.06 \n",
"2015.08.09,Newcastle Utd,Southampton,2,2,2.94,3.22,2.52 \n"]
-- fillHoles testData
holesTestList = [
"14.08.2015, Aston Villa - Manchester United, 0:1, 5.17, 3.63, 1.73",
"West Brom - Manchester City, 0:3, 5.80, 3.99, 1.61",
"09.08.2015, Arsenal - West Ham, 0:2, 1.23, 6.32, 13.06",
"Newcastle Utd - Southampton, 2:2, 2.94, 3.22, 2.52"]
holesTestList2 = [
"14.08.2015, Aston Villa - Manchester United, 0:1, 5.17, 3.63, 1.73",
"14.08.2015,West Brom - Manchester City, 0:3, 5.80, 3.99, 1.61",
"09.08.2015, Arsenal - West Ham, 0:2, 1.23, 6.32, 13.06",
"09.08.2015,Newcastle Utd - Southampton, 2:2, 2.94, 3.22, 2.52"]
-- tests
test_readMatches = TestCase $ do
let m = readMatches $ [matches !! 0]
let m1 = readMatches $ [matches !! 1]
let m2 = readMatches $ [matches !! 2]
let m3 = readMatches $ [matches !! 3]
let m4 = readMatches $ [matches !! 4]
let m5 = readMatches $ [matches !! 5]
m @?= [shouldBe !! 0]
m1 @?= [shouldBe !! 1]
m2 @?= [shouldBe !! 2]
m3 @?= [shouldBe !! 3]
m4 @?= [shouldBe !! 4]
m5@?= [shouldBe !! 5]
test_convertHtml = TestCase $ do
fixtures <- convertHtmlFile fixturesPath fromFixturesHTML
results <- convertHtmlFile resultsPath fromResultsHTML
let f1 = readMatches [fixtures !! 0]
let r1 = readMatches [results !! 0]
fixtures @?= fixturesTest
results @?= resultsTest
[resultsMatchesTest !! 0] @?= r1
[fixturesMatchesTest !! 0] @?= f1
test_fillHoles = TestCase $ do
let h = fillHoles holesTestList ""
holesTestList2 @?= h
| Taketrung/HsPredictor | tests/TestSuites/ParserHTMLSpec.hs | mit | 3,480 | 0 | 13 | 576 | 598 | 325 | 273 | 77 | 1 |
module ProgrammingInHaskell2.Chap02Spec (spec) where
import SpecHelper
import ProgrammingInHaskell2.Chap02
spec :: Spec
spec = do
describe "2.3" $ do
it "head [1,2,3,4,5]" $ do
head [1,2,3,4,5] `shouldBe` 1
it "tail [1,2,3,4,5]" $ do
tail [1,2,3,4,5] `shouldBe` [2,3,4,5]
describe "2.5" $ do
it "factorial 10" $ do
factorial 10 `shouldBe` 3628800
it "average [1,2,3,4,5]" $ do
average [1,2,3,4,5] `shouldBe` 3
| akimichi/haskell-labo | test/ProgrammingInHaskell2/Chap02Spec.hs | mit | 466 | 0 | 15 | 114 | 194 | 104 | 90 | 15 | 1 |
{-# LANGUAGE OverloadedStrings #-}
module Ebay.Types.Shipping
( ShippingDetails (..)
, xpShippingDetails
, ShippingTypeCode (..)
, xpShippingTypeCode
, ShippingServiceOptions (..)
, xpShippingServiceOptions
) where
import Data.Text as T
import Data.XML.Pickle
import Data.XML.Types
import GHC.Read
import Text.ParserCombinators.ReadPrec
import Text.Read.Lex as L
import Ebay.Types
data ShippingDetails = ShippingDetails
{ shippingType :: Maybe ShippingTypeCode
, shippingServiceOptions :: [ShippingServiceOptions]
} deriving (Eq, Show)
xpShippingDetails :: PU [Node] ShippingDetails
xpShippingDetails =
xpWrap (\(a, b) -> ShippingDetails a b)
(\(ShippingDetails a b) -> (a, b)) $
xp2Tuple
(xpOption $ xpElemNodes "{urn:ebay:apis:eBLBaseComponents}ShippingType" $
xpContent xpShippingTypeCode)
(xpList $ xpElemNodes "{urn:ebay:apis:eBLBaseComponents}ShippingServiceOptions" $
xpShippingServiceOptions)
----
data ShippingTypeCode = STCalculated | STCalculatedDomesticFlatInternational | STCustomCode
| STFlat | STFlatDomesticCalculatedInternational | STFree | STFreight
| STFreightFlat | STNotSpecified
deriving (Eq)
instance Show ShippingTypeCode where
show STCalculated = "Calculated"
show STCalculatedDomesticFlatInternational = "CalculatedDomesticFlatInternational"
show STCustomCode = "CustomCode"
show STFlat = "Flat"
show STFlatDomesticCalculatedInternational = "FlatDomesticCalculatedInternational"
show STFree = "Free"
show STFreight = "Freight"
show STFreightFlat = "FreightFlat"
show STNotSpecified = "NotSpecified"
instance Read ShippingTypeCode where
readPrec = parens $ do
L.Ident s <- lexP
case s of "Calculated" -> return STCalculated
"CalculatedDomesticFlatInternational" -> return STCalculatedDomesticFlatInternational
"CustomCode" -> return STCustomCode
"Flat" -> return STFlat
"FlatDomesticCalculatedInternational" -> return STFlatDomesticCalculatedInternational
"Free" -> return STFree
"Freight" -> return STFreight
"FreightFlat" -> return STFreightFlat
"NotSpecified" -> return STNotSpecified
_ -> pfail
xpShippingTypeCode :: PU Text ShippingTypeCode
xpShippingTypeCode = xpPrim
----
data ShippingServiceOptions = ShippingServiceOptions
{ servicePriority :: Maybe Int
, serviceShipper :: Maybe Text
, serviceCost :: Maybe Double
} deriving (Eq, Show)
xpShippingServiceOptions :: PU [Node] ShippingServiceOptions
xpShippingServiceOptions =
xpWrap (\(a, b, c) -> ShippingServiceOptions a b c)
(\(ShippingServiceOptions a b c) -> (a, b, c)) $
xp3Tuple
(xpOption $ xpElemNodes "{urn:ebay:apis:eBLBaseComponents}ShippingServicePriority" $
xpContent xpPrim)
(xpOption $ xpElemText "{urn:ebay:apis:eBLBaseComponents}ShippingService")
(xpOption $ xpElemNodes "{urn:ebay:apis:eBLBaseComponents}ShippingServiceCost" $
xpContent $ xpPrim)
| AndrewRademacher/hs-ebay-trading | src/Ebay/Types/Shipping.hs | mit | 3,801 | 0 | 12 | 1,341 | 660 | 359 | 301 | 72 | 1 |
module SFML.Graphics.SFShapeResizable
where
class SFShapeResizable a where
-- | Set the number of points of a resizable shape.
setPointCount
:: a
-> Int -- ^ New number of points of the shape
-> IO ()
| SFML-haskell/SFML | src/SFML/Graphics/SFShapeResizable.hs | mit | 241 | 0 | 10 | 75 | 36 | 20 | 16 | 6 | 0 |
{-# LANGUAGE GeneralizedNewtypeDeriving, OverloadedStrings #-}
module EventEnv
( MsgEnv(..)
, MQTTEnv(..)
, EventEnv
, runEnv
, lift
, asIO
, respond
, respondNick
, publish
, ask
, asks
, runReaderT
) where
import Control.Monad.Trans.Reader
import Control.Monad.Trans.Class (lift)
import Control.Applicative
import Control.Concurrent (MVar)
import Control.Exception
import Data.ByteString (ByteString)
import Data.Maybe
import Data.Monoid
import Data.Text (Text)
import Data.Text.Encoding (encodeUtf8, decodeUtf8)
import qualified Network.MQTT as MQTT
import Network.SimpleIRC
import System.IO (hPutStrLn, stderr)
data MsgEnv = MsgEnv
{ server :: MIrc
, msg :: IrcMessage
, statusUrl :: String
, karmaFile :: MVar String
, mqttEnv :: MQTTEnv
}
data MQTTEnv = MQTTEnv
{ connection :: MQTT.Config
, pizzaTopic :: MQTT.Topic
, alarmTopic :: MQTT.Topic
, soundTopic :: MQTT.Topic
}
type EventEnv a = ReaderT MsgEnv IO a
runEnv :: EventEnv a -> String -> MVar String -> MIrc -> IrcMessage -> MQTTEnv
-> IO a
runEnv env url file s message mqtt =
runReaderT env $ MsgEnv s message url file mqtt
asIO :: EventEnv a -> EventEnv (IO a)
asIO env = do
s <- asks server
m <- asks msg
url <- asks statusUrl
file <- asks karmaFile
mqtt <- asks mqttEnv
return $ runEnv env url file s m mqtt
respond :: Text -> EventEnv ()
respond resp = do
s <- asks server
origin <- fromJust . mOrigin <$> asks msg
lift $ sendMsg s origin (encodeUtf8 resp)
`catch`
\e -> hPutStrLn stderr $ "respond: " ++ show (e :: IOException)
respondNick :: Text -> EventEnv ()
respondNick resp = do
m <- asks msg
let nick = fromJust $ mNick m
origin = fromJust $ mOrigin m
respond $ if nick == origin
then resp
else decodeUtf8 nick <> ": " <> resp
publish :: (MQTTEnv -> MQTT.Topic) -> ByteString -> EventEnv ()
publish getTopic payload = do
mqtt <- asks (connection . mqttEnv)
topic <- asks (getTopic . mqttEnv)
lift $ MQTT.publish mqtt MQTT.NoConfirm False topic payload
| k00mi/bckspc-bot | src/EventEnv.hs | mit | 2,455 | 0 | 11 | 847 | 714 | 378 | 336 | 71 | 2 |
module PPL2.Pretty.MProg where
import PPL2.VM.Types
import PPL2.Pretty.Instr
-- ----------------------------------------
prettyMProg :: (Show v) =>
(OpCode -> Maybe Mnemonic) -> (MCode, [v]) -> String
prettyMProg toMn (mcode, mdata) =
unlines $
[ "code segment"
, "============"
, ""
] ++
prettyMCode toMn mcode ++
[ ""
, "data segment"
, "============"
, ""
, prettyData mdata
]
-- ----------------------------------------
prettyMCode :: (OpCode -> Maybe Mnemonic) -> MCode -> [String]
prettyMCode mns is =
zipWith pretty' [0..] is
where
pretty' pc' =
prettyInstr (indent pc') (prettyOp mns) (prettyJmp pc') prettyLab
prettyData :: Show v => [v] -> String
prettyData =
unlines . zipWith cell [0::Int ..]
where
cell i v = fmt' $ [show i, show v]
-- ----------------------------------------
| UweSchmidt/ppl2 | src/PPL2/Pretty/MProg.hs | mit | 858 | 0 | 9 | 185 | 274 | 151 | 123 | 26 | 1 |
{- If compiled without ForeignFunctionInterface (part of Haskell2010),
it complains not about FFI but about missing TemplateHaskell -}
foreign import ccall unsafe "getProgArgv"
getProgArgv :: Ptr CInt -> Ptr (Ptr CString) -> IO () | Pnom/haskell-ast-pretty | Test/examples/ForeignImport.hs | mit | 234 | 0 | 10 | 37 | 41 | 20 | 21 | 2 | 0 |
{-# LANGUAGE OverloadedStrings #-}
module Network.API.Mandrill.SendersSpec where
import Test.Hspec
import Test.Hspec.Expectations.Contrib
import Network.API.Mandrill.Types
import Network.API.Mandrill.Utils
import qualified Data.Text as Text
import qualified Network.API.Mandrill.Senders as Senders
import System.Environment
spec :: Spec
spec = do
test_list
test_domains
test_addDomain
test_checkDomain
test_info
test_timeSeries
test_list :: Spec
test_list =
describe "/senders/list.json" $
it "should list all senders" $ do
raw <- getEnv "MANDRILL_API_KEY"
resp <- runMandrill (ApiKey $ Text.pack raw) Senders.list
resp `shouldSatisfy` isRight
test_domains :: Spec
test_domains =
describe "/senders/domains.json" $
it "should list all domains" $ do
raw <- getEnv "MANDRILL_API_KEY"
resp <- runMandrill (ApiKey $ Text.pack raw) Senders.domains
resp `shouldSatisfy` isRight
test_addDomain :: Spec
test_addDomain =
describe "/senders/add-domain.json" $
it "should add a domain" $ do
raw <- getEnv "MANDRILL_API_KEY"
resp <- runMandrill (ApiKey $ Text.pack raw) $
Senders.addDomain "foo.org"
resp `shouldSatisfy` isRight
test_checkDomain :: Spec
test_checkDomain =
describe "/senders/check-domain.json" $
it "should ackknowledge a domain check process" $ do
raw <- getEnv "MANDRILL_API_KEY"
resp <- runMandrill (ApiKey $ Text.pack raw) $
Senders.checkDomain "foo.org"
resp `shouldSatisfy` isRight
test_info :: Spec
test_info =
describe "/senders/info.json" $
it "should show detailed sender info/stats" $ do
raw <- getEnv "MANDRILL_API_KEY"
resp <- runMandrill (ApiKey $ Text.pack raw) $
Senders.info "baz@foo.org"
resp `shouldSatisfy` isRight
test_timeSeries :: Spec
test_timeSeries =
describe "/senders/time-series.json" $
it "should return stats for a sender" $ do
raw <- getEnv "MANDRILL_API_KEY"
resp <- runMandrill (ApiKey $ Text.pack raw) $
Senders.timeSeries "baz@foo.org"
resp `shouldSatisfy` isRight
| krgn/hamdrill | test/Network/API/Mandrill/SendersSpec.hs | mit | 2,161 | 0 | 14 | 487 | 520 | 260 | 260 | 63 | 1 |
{-# htermination getChar :: IO Char #-}
| ComputationWithBoundedResources/ara-inference | doc/tpdb_trs/Haskell/full_haskell/Prelude_getChar_1.hs | mit | 40 | 0 | 2 | 7 | 3 | 2 | 1 | 1 | 0 |
{-# LANGUAGE FlexibleContexts #-}
{-# LANGUAGE FlexibleInstances #-}
{-# LANGUAGE GADTs #-}
{-# LANGUAGE RankNTypes #-}
{-# LANGUAGE ScopedTypeVariables #-}
{-# LANGUAGE TypeFamilies #-}
module DeepShallow where
import Prelude hiding (mod,min,(<),(<=),(>),(>=),(==),(/=),(&&),(++))
import qualified Prelude
import Control.Applicative (Applicative (..))
import Control.Monad (ap,forM_)
import Control.Monad.State hiding (forM)
import Data.Array
import Data.Array.IO hiding (newArray)
import Data.Array.Base (getNumElements)
import Data.IORef
import Data.List (unfoldr)
import Data.Tree hiding (drawTree)
import Data.Tree.View
-- Preliminaries
(<) :: Ord a => FunC a -> FunC a -> FunC Bool
(<) = fromFunC $ Prim "(<)" (Prelude.<)
(<=) :: Ord a => FunC a -> FunC a -> FunC Bool
(<=) = fromFunC $ Prim "(<=)" (Prelude.<=)
(>) :: Ord a => FunC a -> FunC a -> FunC Bool
(>) = fromFunC $ Prim "(>)" (Prelude.>)
(>=) :: Ord a => FunC a -> FunC a -> FunC Bool
(>=) = fromFunC $ Prim "(>=)" (Prelude.>=)
(==) :: Eq a => FunC a -> FunC a -> FunC Bool
(==) = fromFunC $ Prim "(==)" (Prelude.==)
(/=) :: Eq a => FunC a -> FunC a -> FunC Bool
(/=) = fromFunC $ Prim "(/=)" (Prelude./=)
mod :: FunC Int -> FunC Int -> FunC Int
mod = fromFunC $ Prim "mod" Prelude.mod
min :: Ord a => FunC a -> FunC a -> FunC a
min = fromFunC $ Prim "min" Prelude.min
instance Syntactic () where
type Internal () = ()
toFunC () = Lit ()
fromFunC _ = ()
infixr 3 &&
(&&) :: FunC Bool -> FunC Bool -> FunC Bool
(&&) = fromFunC $ Prim "(&&)" (Prelude.&&)
class Type a
instance Type Int
instance Type Bool
instance Type Float
instance Type ()
-- Deep Embedding
data FunC a where
(:$) :: FunC (a -> b) -> FunC a -> FunC b -- Application
Lam :: (FunC a -> FunC b) -> FunC (a -> b) -- Abstraction
-- Symbols
Lit :: Show a => a -> FunC a
If :: FunC (Bool -> a -> a -> a)
While :: FunC ((s -> Bool) -> (s -> s) -> s -> s)
Pair :: FunC (a -> b -> (a,b))
Fst :: FunC ((a,b) -> a)
Snd :: FunC ((a,b) -> b)
Prim :: String -> a -> FunC a
-- Interpretation of variables
Value :: a -> FunC a -- Value of a variable
Variable :: String -> FunC a -- Name of a variable
-- Pure arrays
Arr :: FunC (Int -> (Int -> a) -> Array Int a)
ArrLen :: FunC (Array Int a -> Int)
ArrIx :: FunC (Array Int a -> Int -> a)
Sequential :: FunC (s -> (s -> (a,s)) -> Int -> Array Int a)
-- Monads
Return :: Monad m => FunC (a -> m a)
Bind :: Monad m => FunC (m a -> (a -> m b) -> m b)
-- Monadic arrays
NewArray_ :: FunC (Int -> IO (IOArray Int a))
GetArray :: FunC (IOArray Int a -> Int -> IO a)
PutArray :: FunC (IOArray Int a -> Int -> a -> IO ())
LengthArray :: FunC (IOArray Int a -> IO Int)
FreezeArray :: FunC (IOArray Int a -> IO (Array Int a))
ThawArray :: FunC (Array Int a -> IO (IOArray Int a))
-- Monadic loops
ForM :: Monad m => FunC (Int -> (Int -> m ()) -> m ())
WhileM :: Monad m => FunC (m Bool -> m () -> m ())
-- Monadic references
NewRef :: FunC (a -> IO (IORef a))
ReadRef :: FunC (IORef a -> IO a)
WriteRef :: FunC (IORef a -> a -> IO ())
pair :: FunC a -> FunC b -> FunC (a,b)
pair a b = Pair :$ a :$ b
-- Primitive Functions and Literals
instance (Num a, Show a) => Num (FunC a) where
fromInteger = Lit . fromInteger
a + b = Prim "(+)" (+) :$ a :$ b
a - b = Prim "(-)" (-) :$ a :$ b
a * b = Prim "(*)" (*) :$ a :$ b
abs a = Prim "abs" abs :$ a
signum a = Prim "signum" signum :$ a
true, false :: FunC Bool
true = Lit True
false = Lit False
-- Higher-Order Functions
ex1 :: FunC Int
ex1 = while (<= 100) (*2) 1
-- Evaluation
eval :: FunC a -> a
eval (f :$ a) = eval f $! eval a
eval (Lam f) = eval . f . Value
eval (Lit l) = l
eval If = \c t f -> if c then t else f
eval While = \c b i -> head $ dropWhile c $ iterate b i
eval Pair = (,)
eval Fst = fst
eval Snd = snd
eval (Prim _ f) = f
eval (Value a) = a
eval Arr = \l ixf -> let lm1 = l - 1
in listArray (0,lm1) [ixf i | i <- [0..lm1]]
eval ArrLen = \a -> (1 +) $ uncurry (flip (-)) $ bounds a
eval ArrIx = (!)
eval Sequential = \init step l -> listArray (0,l-1) $ take l $
unfoldr (Just . step) init
eval Return = return
eval Bind = (>>=)
eval NewArray_ = \i -> newArray_ (0,i-1)
eval GetArray = readArray
eval PutArray = writeArray
eval LengthArray = getNumElements
eval FreezeArray = freeze
eval ThawArray = thaw
eval ForM = \l body -> forM_ [0 .. l-1] body
eval WhileM = \cond body -> let loop = do c <- cond
if c
then body >> loop
else return ()
in loop
eval NewRef = newIORef
eval ReadRef = readIORef
eval WriteRef = writeIORef
-- Extensible User Interfaces
class Syntactic a where
type Internal a
toFunC :: a -> FunC (Internal a)
fromFunC :: FunC (Internal a) -> a
instance Syntactic (FunC a) where
type Internal (FunC a) = a
toFunC ast = ast
fromFunC ast = ast
ifC :: Syntactic a => FunC Bool -> a -> a -> a
ifC c t e = fromFunC (If :$ c :$ toFunC t :$ toFunC e)
c ? (t,e) = ifC c t e
-- Pairs
instance (Syntactic a, Syntactic b) => Syntactic (a,b) where
type Internal (a,b) = (Internal a, Internal b)
toFunC (a,b) = Pair :$ toFunC a :$ toFunC b
fromFunC p = (fromFunC (Fst :$ p), fromFunC (Snd :$ toFunC p))
forLoop :: Syntactic s => FunC Int -> s -> (FunC Int -> s -> s) -> s
forLoop len init step = snd $ while (\(i,s) -> i < len)
(\(i,s) -> (i+1, step i s))
(0,init)
gcd :: FunC Int -> FunC Int -> FunC Int
gcd a b = fst $ while (\(a,b) -> a /= b)
(\(a,b) -> a > b ? ( (a-b,b) , (a,b-a) ))
(a,b)
-- Embedding functions
instance (Syntactic a, Syntactic b) => Syntactic (a -> b) where
type Internal (a -> b) = Internal a -> Internal b
toFunC f = Lam (toFunC . f . fromFunC)
fromFunC f = \a -> fromFunC (f :$ toFunC a)
while :: Syntactic s => (s -> FunC Bool) -> (s -> s) -> s -> s
while = fromFunC While
-- Option
data Option a = Option { isSome :: FunC Bool, fromSome :: a }
instance Syntactic a => Syntactic (Option a) where
type Internal (Option a) = (Bool,Internal a)
fromFunC m = Option (fromFunC Fst m) (fromFunC Snd m)
toFunC (Option b a) = fromFunC Pair b a
class Inhabited a where
example :: FunC a
instance Inhabited Bool where example = true
instance Inhabited Int where example = 0
instance Inhabited Float where example = 0
some :: a -> Option a
some a = Option true a
none :: (Syntactic a, Inhabited (Internal a)) => Option a
none = Option false (fromFunC example)
option :: (Syntactic a, Syntactic b) => b -> (a -> b) -> Option a -> b
option noneCase someCase opt = ifC (isSome opt)
(someCase (fromSome opt))
noneCase
instance Functor Option where
fmap f (Option b a) = Option b (f a)
instance Monad Option where
return a = some a
opt >>= k = b { isSome = isSome opt ? (isSome b, false) }
where b = k (fromSome opt)
instance Applicative Option
where
pure = return
(<*>) = ap
resistance :: FunC Float -> FunC Float -> Option (FunC Float)
resistance r1 r2 = do rp1 <- divF 1 r1
rp2 <- divF 1 r2
divF 1 (rp1 + rp2)
divF :: FunC Float -> FunC Float -> Option (FunC Float)
divF a b = b==0 ? (none, some $ div a b)
where
div = fromFunC $ Prim "(/)" (/)
-- Vectors
len :: FunC (Array Int a) -> FunC Int
len = fromFunC ArrLen
(<!>) :: Syntactic a => FunC (Array Int (Internal a)) -> FunC Int -> a
(<!>) = fromFunC ArrIx
data Vector a where
Indexed :: FunC Int -> (FunC Int -> a) -> Vector a
instance Syntactic a => Syntactic (Vector a) where
type Internal (Vector a) = Array Int (Internal a)
toFunC (Indexed l ixf) = fromFunC Arr l ixf
fromFunC arr = Indexed (len arr) (\ix -> arr <!> ix)
zipWithVec :: (Syntactic a, Syntactic b) =>
(a -> b -> c) -> Vector a -> Vector b -> Vector c
zipWithVec f (Indexed l1 ixf1) (Indexed l2 ixf2)
= Indexed (min l1 l2) (\ix -> f (ixf1 ix) (ixf2 ix))
sumVec :: (Syntactic a, Num a) => Vector a -> a
sumVec (Indexed l ixf) = forLoop l 0 (\ix s -> s + ixf ix)
instance Functor Vector where
fmap f (Indexed l ixf) = Indexed l (f . ixf)
scalarProd :: (Syntactic a, Num a) => Vector a -> Vector a -> a
scalarProd a b = sumVec (zipWithVec (*) a b)
-- Rendering the AST
toTreeArgs :: FunC a -> [Tree String] -> State Int (Tree String)
toTreeArgs (f :$ a) as = do
at <- toTreeArgs a []
toTreeArgs f (at:as)
toTreeArgs (Lam f) as = do
v <- get; put (v+1)
let var = Variable ('v' : show v)
body <- toTreeArgs (f var) []
return $ case as of
[] -> Node ("Lam v" Prelude.++ show v) [body]
_ -> Node (":$") (body:as)
toTreeArgs (Variable v) as = return $ Node v as
toTreeArgs sym as = return $ Node (showSym sym) as
where
showSym :: FunC a -> String
showSym (Lit a) = show a
showSym If = "If"
showSym While = "While"
showSym Pair = "Pair"
showSym Fst = "Fst"
showSym Snd = "Snd"
showSym (Prim f _) = f
showSym Arr = "Arr"
showSym ArrLen = "ArrLen"
showSym ArrIx = "ArrIx"
showSym Sequential = "Sequential"
showSym Return = "Return"
showSym Bind = "Bind"
showSym NewArray_ = "NewArray_"
showSym GetArray = "GetArray"
showSym PutArray = "PutArray"
showSym LengthArray = "LengthArray"
showSym FreezeArray = "FreezeArray"
showSym ThawArray = "ThawArray"
showSym ForM = "ForM"
showSym WhileM = "WhileM"
showSym NewRef = "NewRef"
showSym ReadRef = "ReadRef"
showSym WriteRef = "WriteRef"
drawAST :: Syntactic a => a -> IO ()
drawAST = drawTree . toTree . toFunC
toTree :: FunC a -> Tree String
toTree a = evalState (toTreeArgs a []) 0
-- Fusion
memorize :: Syntactic a => Vector a -> Vector a
memorize (Indexed l ixf) = Indexed l (\n -> fromFunC Arr l ixf <!> n)
scalarProdMem :: (Syntactic a, Num a) => Vector a -> Vector a -> a
scalarProdMem a b = sumVec (memorize (zipWithVec (*) a b))
-- Sequential Vectors
scanVec :: Syntactic a => (a -> b -> a) -> a -> Vector b -> Vector a
scanVec f z (Indexed l ixf) = Indexed (l+1) ixf'
where ixf' i = forLoop (i-1) z $ \j s ->
f s (ixf j)
data Seq a = forall s . Syntactic s => Seq s (s -> (a,s)) (FunC Int)
scanSeq :: Syntactic a => (a -> b -> a) -> a -> Seq b -> Seq a
scanSeq f z (Seq init step l) = Seq init' step' (l+1)
where init' = (z,init)
step' (a,s) = let (b,s') = step s
in (a,(f a b,s'))
vecToSeq :: Vector a -> Seq a
vecToSeq (Indexed l ixf) = Seq 0 step l
where step i = (ixf i, i+1)
-- Monads
data Mon m a = M { unM :: forall b . ((a -> FunC (m b)) -> FunC (m b)) }
instance Monad m => Monad (Mon m) where
return a = M $ \k -> k a
M m >>= f = M $ \k -> m (\a -> unM (f a) k)
instance Monad m => Functor (Mon m) where
fmap f m = m >>= return . f
instance Monad m => Applicative (Mon m)
where
pure = return
(<*>) = ap
instance (Monad m, Syntactic a) => Syntactic (Mon m a) where
type Internal (Mon m a) = m (Internal a)
toFunC (M m) = m (fromFunC Return)
fromFunC m = M $ \k -> fromFunC Bind m k
type M a = Mon IO a
type MArr a = FunC (IOArray Int a)
newArray :: FunC Int -> M (MArr a)
getArray :: MArr a -> FunC Int -> M (FunC a)
putArray :: MArr a -> FunC Int -> FunC a -> M ()
lengthArray :: MArr a -> M (FunC Int)
freezeArray :: MArr a -> M (FunC (Array Int a))
thawArray :: FunC (Array Int a) -> M (MArr a)
newArray = fromFunC NewArray_
getArray = fromFunC GetArray
putArray = fromFunC PutArray
lengthArray = fromFunC LengthArray
freezeArray = fromFunC FreezeArray
thawArray = fromFunC ThawArray
forM :: Monad m => FunC Int -> FunC Int -> (FunC Int -> Mon m ()) -> Mon m ()
forM start stop body = fromFunC ForM (stop - start) (body . (+start))
whileM :: Monad m => Mon m (FunC Bool) -> Mon m () -> Mon m ()
whileM = fromFunC WhileM
insertionSort :: Ord a => FunC Int -> MArr a -> M ()
insertionSort l arr = do
forM 1 l $ \i -> do
value <- getArray arr i
j <- newRef (i-1)
let cond = do jv <- readRef j
aj <- getArray arr jv
return (jv >= 0 && aj > value)
whileM cond $ do
jv <- readRef j
a <- getArray arr jv
putArray arr (jv+1) a
writeRef j (jv-1)
jv <- readRef j
putArray arr (jv+1) value
type Ref a = FunC (IORef a)
newRef :: FunC a -> M (Ref a)
newRef = fromFunC NewRef
readRef :: Ref a -> M (FunC a)
readRef = fromFunC ReadRef
writeRef :: Ref a -> FunC a -> M ()
writeRef = fromFunC WriteRef
-- Push vectors
data Push a = Push ((FunC Int -> a -> M ()) -> M ()) (FunC Int)
enum :: FunC Int -> FunC Int -> Push (FunC Int)
enum start stop = Push f (stop - start)
where f w = forM start stop $ \i ->
w i i
(++) :: Push a -> Push a -> Push a
Push f1 l1 ++ Push f2 l2 = Push f (l1 + l2)
where f w = do f1 w
f2 (\i a -> w (i+l1) a)
dup :: Push a -> Push a
dup (Push f l) = Push g (2*l)
where g w = f (\i a -> w i a >> w (i + l) a)
store :: Push (FunC a) -> M (FunC (Array Int a))
store (Push f l) = do
arr <- newArray l
f (putArray arr)
freezeArray arr
-- Mutable data structures
data Buffer a = Buffer
{ indexBuf :: FunC Int -> M a
, putBuf :: a -> M ()
}
initBuffer :: forall a . Syntactic a => Vector a -> M (Buffer a)
initBuffer vec = do
buf <- thawArray (toFunC vec)
l <- lengthArray buf
ir <- newRef 0
let get j = do
i <- readRef ir
fmap fromFunC $ getArray buf $ calcIndex l i j
put a = do
i <- readRef ir
writeRef ir ((i+1) `mod` l)
putArray buf i $ toFunC a
return (Buffer get put)
where
calcIndex l i j = (l+i-j-1) `mod` l
fib :: FunC Int -> M (FunC Int)
fib n = do
let twoOnes = Indexed 2 $ \_ -> 1 -- Initial buffer [1,1]
buf <- initBuffer twoOnes
forM 1 n $ \_ -> do
a <- indexBuf buf 0
b <- indexBuf buf 1
putBuf buf (a+b)
indexBuf buf 0
testFib = eval (toFunC fib) 10
fibList = 1 : 1 : zipWith (+) fibList (tail fibList)
| josefs/deep-shallow-paper | DeepShallow.hs | mit | 14,701 | 0 | 20 | 4,460 | 6,987 | 3,543 | 3,444 | -1 | -1 |
module Physics.RigidBody2d.Forces where
import Algebra.Vector as Vector
import Data.List as List
import Geometry.Vector2d as Vector2d
import Physics.Dynamics as Dynamics
import Physics.RigidBody2d as RigidBody2d
import Prelude.Extensions as PreludeExt
linearForce = \rigid_body direction acceleration -> let
mass = (RigidBody2d.mass rigid_body)
position = (RigidBody2d.position rigid_body)
in (position, Dynamics.directedForce mass (Vector2d.fromAngle direction) acceleration)
relativeLinearForce = \rigid_body direction acceleration -> let
in (linearForce rigid_body ((+) (orientation rigid_body) direction) acceleration)
spinForces = \rigid_body acceleration -> let
position = (RigidBody2d.position rigid_body)
forward_axis = (Vector2d.fromAngle (orientation rigid_body))
right_axis = (Vector2d.rotate forward_axis (toRational ((/) (Prelude.negate pi) 2)))
forward_force = (directedForce (inertiaMoment rigid_body) forward_axis ((/) acceleration 2))
left_force = ((Vector.subtract position right_axis), (Vector.negate forward_force))
right_force = ((Vector.add position right_axis), forward_force)
in [left_force, right_force]
linearFrictionForce = \rigid_body acceleration time -> let
velocity = (RigidBody2d.velocity rigid_body)
decceleration = (Prelude.negate (Dynamics.frictionAcceleration (Vector.length velocity) acceleration time))
in (linearForce rigid_body (Vector2d.toAngle velocity) decceleration)
spinFrictionForces = \rigid_body acceleration time -> let
velocity = (RigidBody2d.angularVelocity rigid_body)
decceleration = (Prelude.negate ((*) (signum velocity) (Dynamics.frictionAcceleration (abs velocity) acceleration time)))
in (spinForces rigid_body decceleration)
frictionForces = \body friction angular_friction timestep -> let
forward_friction = (linearFrictionForce body friction timestep)
spin_friction = (spinFrictionForces body angular_friction timestep)
in ((:) forward_friction spin_friction)
unlimitedLinearForces = \rigid_body max_velocity forces -> let
velocity = (RigidBody2d.velocity rigid_body)
withinMax = \(position, force) -> (isUnlimitedForce velocity max_velocity force)
in (List.filter withinMax forces)
unlimitedSpinForces = \rigid_body max_velocity forces -> let
velocity = (RigidBody2d.angularVelocity rigid_body)
withinMax = \(position, force) -> let
less_than_max = ((<) (abs velocity) max_velocity)
direction = (Vector.subtract position (RigidBody2d.position rigid_body))
is_opposite = ((/=) (signum velocity) (signum (crossProduct direction force)))
in ((||) less_than_max is_opposite)
in (List.filter withinMax forces)
| stevedonnelly/haskell | code/Physics/RigidBody2d/Forces.hs | mit | 2,712 | 0 | 20 | 403 | 796 | 433 | 363 | 45 | 1 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.