_id stringlengths 64 64 | repository stringlengths 6 84 | name stringlengths 4 110 | content stringlengths 0 248k | license null | download_url stringlengths 89 454 | language stringclasses 7 values | comments stringlengths 0 74.6k | code stringlengths 0 248k |
|---|---|---|---|---|---|---|---|---|
62ffd228a5a50076e40b4748b22b695e60621547b3fb6b5b8627a036d71c16ea | reborg/fluorine | fluorine.clj | (ns net.reborg.fluorine
(:require
[net.reborg.fluorine.bootstrap :refer [system]]
[net.reborg.fluorine.fs :as fs]
[clojure.tools.logging :as log]
[compojure.core :as compojure :refer [GET]]
[ring.middleware.params :as params]
[compojure.route :as route]
[aleph.http :as http]
[net.reborg.fluorine.config :as c]
[net.reborg.fluorine.bus :as bus]
[net.reborg.fluorine.watcher :as watcher]
[net.reborg.fluorine.data :as data]
[manifold.stream :as s]
[manifold.deferred :as d]
))
(def non-websocket-request
{:status 400
:headers {"content-type" "application/text"}
:body "Expected a websocket request."})
(defn- push-config!
"Send current config reading down a connected client."
[conn path]
(s/put! conn (data/marshall (fs/read path))))
(defn- register-client [conn ip path]
(bus/subscribe! conn ip path)
(s/consume #(log/info (format "%s %s" % ip)) conn)
(s/on-closed conn #(log/warn (format "closed connection from %s" ip)))
(watcher/register! ip path)
(push-config! conn path)
{:connected true})
(defn connection-handler
[path {:keys [remote-addr] :as req}]
(d/let-flow [conn (d/catch (http/websocket-connection req) (constantly nil))]
(if conn
(register-client conn remote-addr path)
non-websocket-request)))
(def handler
(params/wrap-params
(compojure/routes
(GET "*" {{path :*} :params} (partial connection-handler path))
(route/not-found "No such page."))))
(defn- debug
"Just here for a one off send of a sample config down connected clients."
[]
@(s/put! (:changes system) {:channel "apps/clj-fe" :msg (data/marshall (vary-meta {:a "hello"} assoc :format :edn))}))
| null | https://raw.githubusercontent.com/reborg/fluorine/58d533646adce8537ca5c57692a0a998f06e1d25/src/net/reborg/fluorine.clj | clojure | (ns net.reborg.fluorine
(:require
[net.reborg.fluorine.bootstrap :refer [system]]
[net.reborg.fluorine.fs :as fs]
[clojure.tools.logging :as log]
[compojure.core :as compojure :refer [GET]]
[ring.middleware.params :as params]
[compojure.route :as route]
[aleph.http :as http]
[net.reborg.fluorine.config :as c]
[net.reborg.fluorine.bus :as bus]
[net.reborg.fluorine.watcher :as watcher]
[net.reborg.fluorine.data :as data]
[manifold.stream :as s]
[manifold.deferred :as d]
))
(def non-websocket-request
{:status 400
:headers {"content-type" "application/text"}
:body "Expected a websocket request."})
(defn- push-config!
"Send current config reading down a connected client."
[conn path]
(s/put! conn (data/marshall (fs/read path))))
(defn- register-client [conn ip path]
(bus/subscribe! conn ip path)
(s/consume #(log/info (format "%s %s" % ip)) conn)
(s/on-closed conn #(log/warn (format "closed connection from %s" ip)))
(watcher/register! ip path)
(push-config! conn path)
{:connected true})
(defn connection-handler
[path {:keys [remote-addr] :as req}]
(d/let-flow [conn (d/catch (http/websocket-connection req) (constantly nil))]
(if conn
(register-client conn remote-addr path)
non-websocket-request)))
(def handler
(params/wrap-params
(compojure/routes
(GET "*" {{path :*} :params} (partial connection-handler path))
(route/not-found "No such page."))))
(defn- debug
"Just here for a one off send of a sample config down connected clients."
[]
@(s/put! (:changes system) {:channel "apps/clj-fe" :msg (data/marshall (vary-meta {:a "hello"} assoc :format :edn))}))
| |
ec7b993eff2da18e49c4f77c38df7472d40d9c91044f4a3e8c78196e66e70996 | swannodette/cljs-bootstrap | build.clj | (require '[cljs.build.api :as b])
(b/build "src/node"
{:output-to "main.js"
:target :nodejs
:optimizations :simple
:cache-analysis true
:static-fns true
:optimize-constants true
:verbose true})
(System/exit 0) | null | https://raw.githubusercontent.com/swannodette/cljs-bootstrap/1cdb45c7c4422cc49215860e77b56d2f273059b7/script/build.clj | clojure | (require '[cljs.build.api :as b])
(b/build "src/node"
{:output-to "main.js"
:target :nodejs
:optimizations :simple
:cache-analysis true
:static-fns true
:optimize-constants true
:verbose true})
(System/exit 0) | |
d8631337375928d9ee25bb97db1350ab9b843c089f1dcdf2b4f02824a362229f | luminus-framework/examples | core.clj | (ns multi-client-ws-http-kit.core
(:require
[multi-client-ws-http-kit.handler :as handler]
[multi-client-ws-http-kit.nrepl :as nrepl]
[luminus.http-server :as http]
[multi-client-ws-http-kit.config :refer [env]]
[clojure.tools.cli :refer [parse-opts]]
[clojure.tools.logging :as log]
[mount.core :as mount])
(:gen-class))
;; log uncaught exceptions in threads
(Thread/setDefaultUncaughtExceptionHandler
(reify Thread$UncaughtExceptionHandler
(uncaughtException [_ thread ex]
(log/error {:what :uncaught-exception
:exception ex
:where (str "Uncaught exception on" (.getName thread))}))))
(def cli-options
[["-p" "--port PORT" "Port number"
:parse-fn #(Integer/parseInt %)]])
(mount/defstate ^{:on-reload :noop} http-server
:start
(http/start
(-> env
(assoc :handler (handler/app))
(update :io-threads #(or % (* 2 (.availableProcessors (Runtime/getRuntime)))))
(update :port #(or (-> env :options :port) %))))
:stop
(http/stop http-server))
(mount/defstate ^{:on-reload :noop} repl-server
:start
(when (env :nrepl-port)
(nrepl/start {:bind (env :nrepl-bind)
:port (env :nrepl-port)}))
:stop
(when repl-server
(nrepl/stop repl-server)))
(defn stop-app []
(doseq [component (:stopped (mount/stop))]
(log/info component "stopped"))
(shutdown-agents))
(defn start-app [args]
(doseq [component (-> args
(parse-opts cli-options)
mount/start-with-args
:started)]
(log/info component "started"))
(.addShutdownHook (Runtime/getRuntime) (Thread. stop-app)))
(defn -main [& args]
(start-app args))
| null | https://raw.githubusercontent.com/luminus-framework/examples/cbeee2fef8f457a6a6bac2cae0b640370ae2499b/multi-client-ws-http-kit/src/clj/multi_client_ws_http_kit/core.clj | clojure | log uncaught exceptions in threads | (ns multi-client-ws-http-kit.core
(:require
[multi-client-ws-http-kit.handler :as handler]
[multi-client-ws-http-kit.nrepl :as nrepl]
[luminus.http-server :as http]
[multi-client-ws-http-kit.config :refer [env]]
[clojure.tools.cli :refer [parse-opts]]
[clojure.tools.logging :as log]
[mount.core :as mount])
(:gen-class))
(Thread/setDefaultUncaughtExceptionHandler
(reify Thread$UncaughtExceptionHandler
(uncaughtException [_ thread ex]
(log/error {:what :uncaught-exception
:exception ex
:where (str "Uncaught exception on" (.getName thread))}))))
(def cli-options
[["-p" "--port PORT" "Port number"
:parse-fn #(Integer/parseInt %)]])
(mount/defstate ^{:on-reload :noop} http-server
:start
(http/start
(-> env
(assoc :handler (handler/app))
(update :io-threads #(or % (* 2 (.availableProcessors (Runtime/getRuntime)))))
(update :port #(or (-> env :options :port) %))))
:stop
(http/stop http-server))
(mount/defstate ^{:on-reload :noop} repl-server
:start
(when (env :nrepl-port)
(nrepl/start {:bind (env :nrepl-bind)
:port (env :nrepl-port)}))
:stop
(when repl-server
(nrepl/stop repl-server)))
(defn stop-app []
(doseq [component (:stopped (mount/stop))]
(log/info component "stopped"))
(shutdown-agents))
(defn start-app [args]
(doseq [component (-> args
(parse-opts cli-options)
mount/start-with-args
:started)]
(log/info component "started"))
(.addShutdownHook (Runtime/getRuntime) (Thread. stop-app)))
(defn -main [& args]
(start-app args))
|
eefd5682741280ca4261b8c13b9611977db949cc986811b121122ae9b38256fb | alanz/ghc-exactprint | overloadedrecfldsfail08.hs | # LANGUAGE DuplicateRecordFields , TypeFamilies #
data family F a
data instance F Int = MkFInt { x :: Int }
data instance F Bool = MkFBool { y :: Bool }
-- No data type has both these fields, but they belong to the same
lexical parent ( F ) . This used to confuse DuplicateRecordFields .
foo e = e { x = 3, y = True }
main = return ()
| null | https://raw.githubusercontent.com/alanz/ghc-exactprint/b6b75027811fa4c336b34122a7a7b1a8df462563/tests/examples/ghc80/overloadedrecfldsfail08.hs | haskell | No data type has both these fields, but they belong to the same | # LANGUAGE DuplicateRecordFields , TypeFamilies #
data family F a
data instance F Int = MkFInt { x :: Int }
data instance F Bool = MkFBool { y :: Bool }
lexical parent ( F ) . This used to confuse DuplicateRecordFields .
foo e = e { x = 3, y = True }
main = return ()
|
247feec79c0cc9c1a478dc03c81e2110a66cdea8b3c6e6f3a2fd2710c26ab991 | wdebeaum/step | whatsoever.lisp | ;;;;
;;;; W::whatsoever
;;;;
(define-words :pos W::adj :templ CENTRAL-ADJ-TEMPL
:words (
;; negative polarity
(W::whatsoever
(SENSES
;; primarily negative polarity
;; positive polarity: "you can have anything whatsoever" "anything whatsoever will fit in there"
((LF-PARENT ONT::least-extent)
(templ postpositive-adj-templ)
(meta-data :origin cardiac :entry-date 20090120 :change-date nil :comments LM-vocab)
(example "none whatsoever" "no plan whatsoever")
)
)
)
))
| null | https://raw.githubusercontent.com/wdebeaum/step/f38c07d9cd3a58d0e0183159d4445de9a0eafe26/src/LexiconManager/Data/new/whatsoever.lisp | lisp |
W::whatsoever
negative polarity
primarily negative polarity
positive polarity: "you can have anything whatsoever" "anything whatsoever will fit in there" |
(define-words :pos W::adj :templ CENTRAL-ADJ-TEMPL
:words (
(W::whatsoever
(SENSES
((LF-PARENT ONT::least-extent)
(templ postpositive-adj-templ)
(meta-data :origin cardiac :entry-date 20090120 :change-date nil :comments LM-vocab)
(example "none whatsoever" "no plan whatsoever")
)
)
)
))
|
ef3b91f1c7a35851ed7d30c3c53bf40741cbfa9213792882fc3dcdde26467412 | ocramz/decision-trees | Histogram.hs | # language DeriveFunctor , DeriveFoldable #
module Data.Histogram (Histogram , mkHistogram, getHistogram, normalize, entropy) where
import qualified Data.Foldable as F
import Data.Monoid (Sum(..))
import qualified Data.IntMap as IM
import qualified Data.Map.Strict as M
import Control.Arrow ((&&&))
data Histogram k a = Histogram {
binFunction :: a -> k
, unHistogram :: M.Map k (Count [a]) }
instance (Eq k, Eq a) => Eq (Histogram k a) where
h1 == h2 = unHistogram h1 == unHistogram h2
instance (Show k, Show a) => Show (Histogram k a) where
show h = show (unHistogram h)
-- | Populate a Histogram given a quantization function and a Foldable of data
mkHistogram :: (Ord k, Foldable t) => (a -> k) -> t a -> Histogram k a
mkHistogram kf = addToHistogram (empty kf)
| counts for a Histogram
getHistogram :: Histogram k a -> M.Map k Int
getHistogram h = getSum . getCount <$> unHistogram h
getNElems :: Histogram k a -> Int
getNElems = sum . getHistogram
-- | Compute a distribution from a Histogram
normalize :: Fractional p => Histogram k a -> M.Map k p
normalize hh = f <$> ns
where
ns = fromIntegral <$> getHistogram hh
n = fromIntegral $ getNElems hh
f x = x / n
entropy :: Floating h => Histogram k a -> h
entropy h = sum $ fmap (\p -> p * logBase 2 p) ps
where
ps = normalize h
getBins :: Functor t => t (Count a) -> t a
getBins = fmap getCountItems
* A packaged Histogram type
| To construct a histogram we only need a quantization function ( i.e. that decides in which does an element fall into )
empty :: (a -> k) -> Histogram k a
empty kf = Histogram kf M.empty
addToHistogram :: (Ord k, Foldable t) => Histogram k a -> t a -> Histogram k a
addToHistogram h0 xs = Histogram kf (M.union hm0 hm1) where
(Histogram kf hm0) = h0
hm1 = M.fromListWith (<>) $ map (kf &&& mkCount1) $ F.toList xs
-- | Count monoid
data Count a = Count { getCount :: !(Sum Int), getCountItems :: a } deriving (Eq, Show, Functor, Foldable)
instance Semigroup a => Semigroup (Count a) where
(Count n xs) <> (Count m ys) = Count (n <> m) (xs <> ys)
instance Monoid a => Monoid (Count a) where
mempty = Count (Sum 0) mempty
mkCount1 :: a -> Count [a]
mkCount1 x = Count 1 [x]
| null | https://raw.githubusercontent.com/ocramz/decision-trees/a94f46cebeb715770cdd4d9b614f51bf0b1096c5/src/Data/Histogram.hs | haskell | | Populate a Histogram given a quantization function and a Foldable of data
| Compute a distribution from a Histogram
| Count monoid | # language DeriveFunctor , DeriveFoldable #
module Data.Histogram (Histogram , mkHistogram, getHistogram, normalize, entropy) where
import qualified Data.Foldable as F
import Data.Monoid (Sum(..))
import qualified Data.IntMap as IM
import qualified Data.Map.Strict as M
import Control.Arrow ((&&&))
data Histogram k a = Histogram {
binFunction :: a -> k
, unHistogram :: M.Map k (Count [a]) }
instance (Eq k, Eq a) => Eq (Histogram k a) where
h1 == h2 = unHistogram h1 == unHistogram h2
instance (Show k, Show a) => Show (Histogram k a) where
show h = show (unHistogram h)
mkHistogram :: (Ord k, Foldable t) => (a -> k) -> t a -> Histogram k a
mkHistogram kf = addToHistogram (empty kf)
| counts for a Histogram
getHistogram :: Histogram k a -> M.Map k Int
getHistogram h = getSum . getCount <$> unHistogram h
getNElems :: Histogram k a -> Int
getNElems = sum . getHistogram
normalize :: Fractional p => Histogram k a -> M.Map k p
normalize hh = f <$> ns
where
ns = fromIntegral <$> getHistogram hh
n = fromIntegral $ getNElems hh
f x = x / n
entropy :: Floating h => Histogram k a -> h
entropy h = sum $ fmap (\p -> p * logBase 2 p) ps
where
ps = normalize h
getBins :: Functor t => t (Count a) -> t a
getBins = fmap getCountItems
* A packaged Histogram type
| To construct a histogram we only need a quantization function ( i.e. that decides in which does an element fall into )
empty :: (a -> k) -> Histogram k a
empty kf = Histogram kf M.empty
addToHistogram :: (Ord k, Foldable t) => Histogram k a -> t a -> Histogram k a
addToHistogram h0 xs = Histogram kf (M.union hm0 hm1) where
(Histogram kf hm0) = h0
hm1 = M.fromListWith (<>) $ map (kf &&& mkCount1) $ F.toList xs
data Count a = Count { getCount :: !(Sum Int), getCountItems :: a } deriving (Eq, Show, Functor, Foldable)
instance Semigroup a => Semigroup (Count a) where
(Count n xs) <> (Count m ys) = Count (n <> m) (xs <> ys)
instance Monoid a => Monoid (Count a) where
mempty = Count (Sum 0) mempty
mkCount1 :: a -> Count [a]
mkCount1 x = Count 1 [x]
|
00304f4c8b3104292f001f7e01ba7553589c7c1452a7f25cee12ede147f217ba | viercc/matchable | Matchable.hs | # LANGUAGE EmptyCase #
# LANGUAGE FlexibleContexts #
# LANGUAGE TypeFamilies #
{-# LANGUAGE TypeOperators #-}
{-# LANGUAGE DeriveFunctor #-}
# LANGUAGE UndecidableInstances #
module Data.Matchable(
-- * Matchable class
Matchable(..),
zipzipMatch,
fmapRecovered,
eqDefault,
liftEqDefault,
* Define Matchable by Generic
Matchable'(), genericZipMatchWith,
) where
import Control.Applicative
import Data.Functor.Classes ( Eq1 )
import Data.Functor.Classes.Orphans ()
import Data.Maybe (fromMaybe, isJust)
import Data.Foldable
import Data.Functor.Identity
import Data.Functor.Compose
import Data.Functor.Product
import Data.Functor.Sum
import Data.Tagged
import Data.Proxy
import Data.List.NonEmpty (NonEmpty)
import Data.Map.Lazy (Map)
import qualified Data.Map.Lazy as Map
import Data.IntMap.Lazy (IntMap)
import qualified Data.IntMap.Lazy as IntMap
import qualified Data.IntMap.Merge.Lazy as IntMap
import Data.Tree (Tree)
import Data.Sequence (Seq)
import qualified Data.Sequence as Seq
import Data.Vector (Vector)
import qualified Data.Vector as Vector
import Data.Hashable (Hashable)
import Data.HashMap.Lazy (HashMap)
import qualified Data.HashMap.Lazy as HashMap
import GHC.Generics
( Generic1(..),
V1,
U1(..),
Par1(Par1),
Rec1(Rec1),
K1(K1),
M1(M1),
type (:+:)(..),
type (:*:)(..),
type (:.:)(Comp1) )
import GHC.Generics.Generically ( Generically1(..) )
-- $setup
-- This is required to silence "type defaults" warning, which clutters GHCi
-- output and makes doctests fail.
-- >>> :set -Wno-type-defaults
| Containers that allows exact structural matching of two containers .
class (Eq1 t, Functor t) => Matchable t where
|
Decides if two structures match exactly . If they match , return zipped version of them .
> ta tb = Just tab
holds if and only if both of
> ta = fmap fst tab
> tb = fmap snd tab
holds . Otherwise , @zipMatch ta tb = Nothing@.
For example , the type signature of @zipMatch@ on the list reads as follows :
> : : [ a ] - > [ b ] - > Maybe [ ( a , b ) ]
@zipMatch as bs@ returns @Just ( zip as bs)@ if the lengths of two given lists are
same , and returns @Nothing@ otherwise .
= = = = Example
> > > [ 1 , 2 , 3 ] [ ' a ' , ' b ' , ' c ' ]
Just [ ( 1,'a'),(2,'b'),(3,'c ' ) ]
> > > [ 1 , 2 , 3 ] [ ' a ' , ' b ' ]
Nothing
Decides if two structures match exactly. If they match, return zipped version of them.
> zipMatch ta tb = Just tab
holds if and only if both of
> ta = fmap fst tab
> tb = fmap snd tab
holds. Otherwise, @zipMatch ta tb = Nothing@.
For example, the type signature of @zipMatch@ on the list Functor @[]@ reads as follows:
> zipMatch :: [a] -> [b] -> Maybe [(a,b)]
@zipMatch as bs@ returns @Just (zip as bs)@ if the lengths of two given lists are
same, and returns @Nothing@ otherwise.
==== Example
>>> zipMatch [1, 2, 3] ['a', 'b', 'c']
Just [(1,'a'),(2,'b'),(3,'c')]
>>> zipMatch [1, 2, 3] ['a', 'b']
Nothing
-}
zipMatch :: t a -> t b -> Maybe (t (a,b))
zipMatch = zipMatchWith (curry Just)
|
Match two structures . If they match , zip them with given function
@(a - > b - > Maybe c)@. Passed function can make whole match fail
by returning @Nothing@.
A definition of ' zipMatchWith ' must satisfy :
* If there is a pair @(tab , tc)@ such that fulfills all following three conditions ,
then @zipMatchWith f ta tb = Just tc@.
1 . @ta = fmap fst tab@
2 . @tb = fmap snd tab@
3 . ( uncurry f ) tab = fmap Just tc@
* If there are no such pair , @zipMatchWith f ta tb = Nothing@.
If @t@ is also ' ' , the last condition can be dropped and
the equation can be stated without using @tc@.
> zipMatchWith f ta tb = traverse ( uncurry f ) tab
@zipMatch@ can be defined in terms of @zipMatchWith@.
And if @t@ is also @Traversable@ , @zipMatchWith@ can be defined in terms of @zipMatch@.
When you implement both of them by hand , keep their relation in the way
the default implementation is .
> = zipMatchWith ( curry pure )
> zipMatchWith f ta tb = zipMatch ta tb > > = traverse ( uncurry f )
Match two structures. If they match, zip them with given function
@(a -> b -> Maybe c)@. Passed function can make whole match fail
by returning @Nothing@.
A definition of 'zipMatchWith' must satisfy:
* If there is a pair @(tab, tc)@ such that fulfills all following three conditions,
then @zipMatchWith f ta tb = Just tc@.
1. @ta = fmap fst tab@
2. @tb = fmap snd tab@
3. @fmap (uncurry f) tab = fmap Just tc@
* If there are no such pair, @zipMatchWith f ta tb = Nothing@.
If @t@ is also 'Traversable', the last condition can be dropped and
the equation can be stated without using @tc@.
> zipMatchWith f ta tb = traverse (uncurry f) tab
@zipMatch@ can be defined in terms of @zipMatchWith@.
And if @t@ is also @Traversable@, @zipMatchWith@ can be defined in terms of @zipMatch@.
When you implement both of them by hand, keep their relation in the way
the default implementation is.
> zipMatch = zipMatchWith (curry pure)
> zipMatchWith f ta tb = zipMatch ta tb >>= traverse (uncurry f)
-}
zipMatchWith :: (a -> b -> Maybe c) -> t a -> t b -> Maybe (t c)
# MINIMAL zipMatchWith #
| > zipzipMatch = zipMatchWith
zipzipMatch
:: (Matchable t, Matchable u)
=> t (u a)
-> t (u b)
-> Maybe (t (u (a, b)))
zipzipMatch = zipMatchWith zipMatch
| @Matchable t@ implies @Functor t@.
-- It is not recommended to implement @fmap@ through this function,
-- so it is named @fmapRecovered@ but not @fmapDefault@.
fmapRecovered :: (Matchable t) => (a -> b) -> t a -> t b
fmapRecovered f ta =
fromMaybe (error "Law-violating Matchable instance") $
zipMatchWith (\a _ -> Just (f a)) ta ta
-- | @Matchable t@ implies @Eq a => Eq (t a)@.
eqDefault :: (Matchable t, Eq a) => t a -> t a -> Bool
eqDefault = liftEqDefault (==)
-- | @Matchable t@ implies @Eq1 t@.
liftEqDefault :: (Matchable t) => (a -> b -> Bool) -> t a -> t b -> Bool
liftEqDefault eq tx ty =
let u x y = if x `eq` y then Just () else Nothing
in isJust $ zipMatchWith u tx ty
-----------------------------------------------
instance Matchable Identity where
zipMatchWith = genericZipMatchWith
instance (Eq k) => Matchable (Const k) where
zipMatchWith = genericZipMatchWith
instance (Matchable f, Matchable g) => Matchable (Product f g) where
zipMatchWith = genericZipMatchWith
instance (Matchable f, Matchable g) => Matchable (Sum f g) where
zipMatchWith = genericZipMatchWith
instance (Matchable f, Matchable g) => Matchable (Compose f g) where
zipMatchWith = genericZipMatchWith
instance Matchable Proxy where
zipMatchWith _ _ _ = Just Proxy
instance Matchable (Tagged t) where
zipMatchWith = genericZipMatchWith
instance Matchable Maybe where
zipMatchWith = genericZipMatchWith
instance Matchable [] where
zipMatchWith = genericZipMatchWith
instance Matchable NonEmpty where
zipMatchWith = genericZipMatchWith
instance (Eq e) => Matchable ((,) e) where
zipMatchWith = genericZipMatchWith
instance (Eq e) => Matchable (Either e) where
zipMatchWith = genericZipMatchWith
instance Matchable Seq where
zipMatch as bs
| Seq.length as == Seq.length bs = Just (Seq.zip as bs)
| otherwise = Nothing
zipMatchWith u as bs
| Seq.length as == Seq.length bs = unsafeFillIn u as (Data.Foldable.toList bs)
| otherwise = Nothing
instance (Eq k) => Matchable (Map k) where
zipMatchWith u as bs
| Map.size as == Map.size bs =
Map.fromDistinctAscList <$>
zipMatchWith (zipMatchWith u) (Map.toAscList as) (Map.toAscList bs)
| otherwise = Nothing
instance Matchable IntMap where
zipMatchWith u as bs
| IntMap.size as == IntMap.size bs = merger as bs
| otherwise = Nothing
where
miss = IntMap.traverseMissing (\_ _ -> Nothing)
merger = IntMap.mergeA miss miss (IntMap.zipWithAMatched (const u))
instance Matchable Tree where
zipMatchWith = genericZipMatchWith
instance Matchable Vector where
zipMatch as bs
| Vector.length as == Vector.length bs = Just (Vector.zip as bs)
| otherwise = Nothing
zipMatchWith u as bs
| Vector.length as == Vector.length bs = Vector.zipWithM u as bs
| otherwise = Nothing
instance (Eq k, Hashable k) => Matchable (HashMap k) where
zipMatch as bs
| HashMap.size as == HashMap.size bs =
HashMap.traverseWithKey (\k a -> (,) a <$> HashMap.lookup k bs) as
| otherwise = Nothing
zipMatchWith u as bs
| HashMap.size as == HashMap.size bs =
HashMap.traverseWithKey (\k a -> u a =<< HashMap.lookup k bs) as
| otherwise = Nothing
instance (Generic1 f, Matchable' (Rep1 f)) => Matchable (Generically1 f) where
zipMatchWith f (Generically1 x) (Generically1 y) = Generically1 <$> genericZipMatchWith f x y
* Generic definition
An instance of Matchable can be implemened through GHC Generics .
As a prerequisite , you need to make your type an instance of ' Functor ' and ' Generic1 ' .
Both of them can be derived using DeriveFunctor and DeriveGeneric extension .
Using ' Generically1 ' and DerivingVia extension , @Matchable@ instance can be automatically derived .
> > > : set -XDeriveFunctor
> > > : set -XDeriveGeneric
> > > : set > > > : {
data MyTree label a = Leaf a | Node label [ MyTree label a ]
deriving stock ( Show , Read , Eq , Ord , Functor , Generic1 )
deriving ( Eq1 , Matchable ) via ( Generically1 ( MyTree label ) )
:}
Alternatively , you can use ' genericZipMatchWith ' to manually define @zipMatchWith@ method .
> instance ( Eq label ) = > Matchable ( MyTree label ) where
> zipMatchWith = genericZipMatchWith
> instance ( Eq label ) = > Eq1 ( MyTree label ) where
> liftEq = liftEqDefault
> > > ( Node " foo " [ Leaf 1 , Leaf 2 ] ) ( Node " foo " [ Leaf ' a ' , Leaf ' b ' ] )
Just ( Node " foo " [ Leaf ( 1,'a'),Leaf ( 2,'b ' ) ] )
> > > ( Node " foo " [ Leaf 1 , Leaf 2 ] ) ( Node " bar " [ Leaf ' a ' , Leaf ' b ' ] )
Nothing
> > > ( Node " foo " [ Leaf 1 ] ) ( Node " foo " [ ] )
Nothing
An instance of Matchable can be implemened through GHC Generics.
As a prerequisite, you need to make your type an instance of 'Functor' and 'Generic1'.
Both of them can be derived using DeriveFunctor and DeriveGeneric extension.
Using 'Generically1' and DerivingVia extension, @Matchable@ instance can be automatically derived.
>>> :set -XDeriveFunctor
>>> :set -XDeriveGeneric
>>> :set -XDerivingVia
>>> :{
data MyTree label a = Leaf a | Node label [MyTree label a]
deriving stock (Show, Read, Eq, Ord, Functor, Generic1)
deriving (Eq1, Matchable) via (Generically1 (MyTree label))
:}
Alternatively, you can use 'genericZipMatchWith' to manually define @zipMatchWith@ method.
> instance (Eq label) => Matchable (MyTree label) where
> zipMatchWith = genericZipMatchWith
> instance (Eq label) => Eq1 (MyTree label) where
> liftEq = liftEqDefault
>>> zipMatch (Node "foo" [Leaf 1, Leaf 2]) (Node "foo" [Leaf 'a', Leaf 'b'])
Just (Node "foo" [Leaf (1,'a'),Leaf (2,'b')])
>>> zipMatch (Node "foo" [Leaf 1, Leaf 2]) (Node "bar" [Leaf 'a', Leaf 'b'])
Nothing
>>> zipMatch (Node "foo" [Leaf 1]) (Node "foo" [])
Nothing
-}
class (Functor t, Eq1 t) => Matchable' t where
zipMatchWith' :: (a -> b -> Maybe c) -> t a -> t b -> Maybe (t c)
| zipMatchWith via Generics .
genericZipMatchWith
:: (Generic1 t, Matchable' (Rep1 t))
=> (a -> b -> Maybe c)
-> t a
-> t b
-> Maybe (t c)
genericZipMatchWith u ta tb = to1 <$> zipMatchWith' u (from1 ta) (from1 tb)
# INLINABLE genericZipMatchWith #
instance Matchable' V1 where
{-# INLINABLE zipMatchWith' #-}
zipMatchWith' _ a _ = case a of { }
instance Matchable' U1 where
{-# INLINABLE zipMatchWith' #-}
zipMatchWith' _ _ _ = pure U1
instance Matchable' Par1 where
{-# INLINABLE zipMatchWith' #-}
zipMatchWith' u (Par1 a) (Par1 b) = Par1 <$> u a b
instance Matchable f => Matchable' (Rec1 f) where
{-# INLINABLE zipMatchWith' #-}
zipMatchWith' u (Rec1 fa) (Rec1 fb) = Rec1 <$> zipMatchWith u fa fb
instance (Eq c) => Matchable' (K1 i c) where
{-# INLINABLE zipMatchWith' #-}
zipMatchWith' _ (K1 ca) (K1 cb)
= if ca == cb then pure (K1 ca) else empty
instance Matchable' f => Matchable' (M1 i c f) where
{-# INLINABLE zipMatchWith' #-}
zipMatchWith' u (M1 fa) (M1 fb) = M1 <$> zipMatchWith' u fa fb
instance (Matchable' f, Matchable' g) => Matchable' (f :+: g) where
{-# INLINABLE zipMatchWith' #-}
zipMatchWith' u (L1 fa) (L1 fb) = L1 <$> zipMatchWith' u fa fb
zipMatchWith' u (R1 ga) (R1 gb) = R1 <$> zipMatchWith' u ga gb
zipMatchWith' _ _ _ = empty
instance (Matchable' f, Matchable' g) => Matchable' (f :*: g) where
{-# INLINABLE zipMatchWith' #-}
zipMatchWith' u (fa :*: ga) (fb :*: gb) =
liftA2 (:*:) (zipMatchWith' u fa fb) (zipMatchWith' u ga gb)
instance (Matchable f, Matchable' g) => Matchable' (f :.: g) where
{-# INLINABLE zipMatchWith' #-}
zipMatchWith' u (Comp1 fga) (Comp1 fgb) =
Comp1 <$> zipMatchWith (zipMatchWith' u) fga fgb
-- Utility functions
unsafeFillIn :: (Traversable f) => (a -> b -> Maybe c) -> f a -> [b] -> Maybe (f c)
unsafeFillIn u as bs = fst <$> runFillIn (traverse (useOne u) as) bs
-- Just a @StateT [b] Maybe@ but avoids to depend on transformers
newtype FillIn b a = FillIn { runFillIn :: [b] -> Maybe (a, [b]) }
deriving (Functor)
instance Applicative (FillIn b) where
pure a = FillIn $ \bs -> Just (a, bs)
FillIn fx <*> FillIn fy = FillIn $ \bs ->
fx bs >>= \(x, bs') ->
fy bs' >>= \(y, bs'') -> Just (x y, bs'')
useOne :: (a -> b -> Maybe c) -> a -> FillIn b c
useOne u a = FillIn $ \bs -> case bs of
[] -> Nothing
(b:bs') -> u a b >>= \c -> Just (c, bs')
| null | https://raw.githubusercontent.com/viercc/matchable/d43abf37e6658c1690d0ed35b4c83fcd533bf591/src/Data/Matchable.hs | haskell | # LANGUAGE TypeOperators #
# LANGUAGE DeriveFunctor #
* Matchable class
$setup
This is required to silence "type defaults" warning, which clutters GHCi
output and makes doctests fail.
>>> :set -Wno-type-defaults
It is not recommended to implement @fmap@ through this function,
so it is named @fmapRecovered@ but not @fmapDefault@.
| @Matchable t@ implies @Eq a => Eq (t a)@.
| @Matchable t@ implies @Eq1 t@.
---------------------------------------------
# INLINABLE zipMatchWith' #
# INLINABLE zipMatchWith' #
# INLINABLE zipMatchWith' #
# INLINABLE zipMatchWith' #
# INLINABLE zipMatchWith' #
# INLINABLE zipMatchWith' #
# INLINABLE zipMatchWith' #
# INLINABLE zipMatchWith' #
# INLINABLE zipMatchWith' #
Utility functions
Just a @StateT [b] Maybe@ but avoids to depend on transformers | # LANGUAGE EmptyCase #
# LANGUAGE FlexibleContexts #
# LANGUAGE TypeFamilies #
# LANGUAGE UndecidableInstances #
module Data.Matchable(
Matchable(..),
zipzipMatch,
fmapRecovered,
eqDefault,
liftEqDefault,
* Define Matchable by Generic
Matchable'(), genericZipMatchWith,
) where
import Control.Applicative
import Data.Functor.Classes ( Eq1 )
import Data.Functor.Classes.Orphans ()
import Data.Maybe (fromMaybe, isJust)
import Data.Foldable
import Data.Functor.Identity
import Data.Functor.Compose
import Data.Functor.Product
import Data.Functor.Sum
import Data.Tagged
import Data.Proxy
import Data.List.NonEmpty (NonEmpty)
import Data.Map.Lazy (Map)
import qualified Data.Map.Lazy as Map
import Data.IntMap.Lazy (IntMap)
import qualified Data.IntMap.Lazy as IntMap
import qualified Data.IntMap.Merge.Lazy as IntMap
import Data.Tree (Tree)
import Data.Sequence (Seq)
import qualified Data.Sequence as Seq
import Data.Vector (Vector)
import qualified Data.Vector as Vector
import Data.Hashable (Hashable)
import Data.HashMap.Lazy (HashMap)
import qualified Data.HashMap.Lazy as HashMap
import GHC.Generics
( Generic1(..),
V1,
U1(..),
Par1(Par1),
Rec1(Rec1),
K1(K1),
M1(M1),
type (:+:)(..),
type (:*:)(..),
type (:.:)(Comp1) )
import GHC.Generics.Generically ( Generically1(..) )
| Containers that allows exact structural matching of two containers .
class (Eq1 t, Functor t) => Matchable t where
|
Decides if two structures match exactly . If they match , return zipped version of them .
> ta tb = Just tab
holds if and only if both of
> ta = fmap fst tab
> tb = fmap snd tab
holds . Otherwise , @zipMatch ta tb = Nothing@.
For example , the type signature of @zipMatch@ on the list reads as follows :
> : : [ a ] - > [ b ] - > Maybe [ ( a , b ) ]
@zipMatch as bs@ returns @Just ( zip as bs)@ if the lengths of two given lists are
same , and returns @Nothing@ otherwise .
= = = = Example
> > > [ 1 , 2 , 3 ] [ ' a ' , ' b ' , ' c ' ]
Just [ ( 1,'a'),(2,'b'),(3,'c ' ) ]
> > > [ 1 , 2 , 3 ] [ ' a ' , ' b ' ]
Nothing
Decides if two structures match exactly. If they match, return zipped version of them.
> zipMatch ta tb = Just tab
holds if and only if both of
> ta = fmap fst tab
> tb = fmap snd tab
holds. Otherwise, @zipMatch ta tb = Nothing@.
For example, the type signature of @zipMatch@ on the list Functor @[]@ reads as follows:
> zipMatch :: [a] -> [b] -> Maybe [(a,b)]
@zipMatch as bs@ returns @Just (zip as bs)@ if the lengths of two given lists are
same, and returns @Nothing@ otherwise.
==== Example
>>> zipMatch [1, 2, 3] ['a', 'b', 'c']
Just [(1,'a'),(2,'b'),(3,'c')]
>>> zipMatch [1, 2, 3] ['a', 'b']
Nothing
-}
zipMatch :: t a -> t b -> Maybe (t (a,b))
zipMatch = zipMatchWith (curry Just)
|
Match two structures . If they match , zip them with given function
@(a - > b - > Maybe c)@. Passed function can make whole match fail
by returning @Nothing@.
A definition of ' zipMatchWith ' must satisfy :
* If there is a pair @(tab , tc)@ such that fulfills all following three conditions ,
then @zipMatchWith f ta tb = Just tc@.
1 . @ta = fmap fst tab@
2 . @tb = fmap snd tab@
3 . ( uncurry f ) tab = fmap Just tc@
* If there are no such pair , @zipMatchWith f ta tb = Nothing@.
If @t@ is also ' ' , the last condition can be dropped and
the equation can be stated without using @tc@.
> zipMatchWith f ta tb = traverse ( uncurry f ) tab
@zipMatch@ can be defined in terms of @zipMatchWith@.
And if @t@ is also @Traversable@ , @zipMatchWith@ can be defined in terms of @zipMatch@.
When you implement both of them by hand , keep their relation in the way
the default implementation is .
> = zipMatchWith ( curry pure )
> zipMatchWith f ta tb = zipMatch ta tb > > = traverse ( uncurry f )
Match two structures. If they match, zip them with given function
@(a -> b -> Maybe c)@. Passed function can make whole match fail
by returning @Nothing@.
A definition of 'zipMatchWith' must satisfy:
* If there is a pair @(tab, tc)@ such that fulfills all following three conditions,
then @zipMatchWith f ta tb = Just tc@.
1. @ta = fmap fst tab@
2. @tb = fmap snd tab@
3. @fmap (uncurry f) tab = fmap Just tc@
* If there are no such pair, @zipMatchWith f ta tb = Nothing@.
If @t@ is also 'Traversable', the last condition can be dropped and
the equation can be stated without using @tc@.
> zipMatchWith f ta tb = traverse (uncurry f) tab
@zipMatch@ can be defined in terms of @zipMatchWith@.
And if @t@ is also @Traversable@, @zipMatchWith@ can be defined in terms of @zipMatch@.
When you implement both of them by hand, keep their relation in the way
the default implementation is.
> zipMatch = zipMatchWith (curry pure)
> zipMatchWith f ta tb = zipMatch ta tb >>= traverse (uncurry f)
-}
zipMatchWith :: (a -> b -> Maybe c) -> t a -> t b -> Maybe (t c)
# MINIMAL zipMatchWith #
| > zipzipMatch = zipMatchWith
zipzipMatch
:: (Matchable t, Matchable u)
=> t (u a)
-> t (u b)
-> Maybe (t (u (a, b)))
zipzipMatch = zipMatchWith zipMatch
| @Matchable t@ implies @Functor t@.
fmapRecovered :: (Matchable t) => (a -> b) -> t a -> t b
fmapRecovered f ta =
fromMaybe (error "Law-violating Matchable instance") $
zipMatchWith (\a _ -> Just (f a)) ta ta
eqDefault :: (Matchable t, Eq a) => t a -> t a -> Bool
eqDefault = liftEqDefault (==)
liftEqDefault :: (Matchable t) => (a -> b -> Bool) -> t a -> t b -> Bool
liftEqDefault eq tx ty =
let u x y = if x `eq` y then Just () else Nothing
in isJust $ zipMatchWith u tx ty
instance Matchable Identity where
zipMatchWith = genericZipMatchWith
instance (Eq k) => Matchable (Const k) where
zipMatchWith = genericZipMatchWith
instance (Matchable f, Matchable g) => Matchable (Product f g) where
zipMatchWith = genericZipMatchWith
instance (Matchable f, Matchable g) => Matchable (Sum f g) where
zipMatchWith = genericZipMatchWith
instance (Matchable f, Matchable g) => Matchable (Compose f g) where
zipMatchWith = genericZipMatchWith
instance Matchable Proxy where
zipMatchWith _ _ _ = Just Proxy
instance Matchable (Tagged t) where
zipMatchWith = genericZipMatchWith
instance Matchable Maybe where
zipMatchWith = genericZipMatchWith
instance Matchable [] where
zipMatchWith = genericZipMatchWith
instance Matchable NonEmpty where
zipMatchWith = genericZipMatchWith
instance (Eq e) => Matchable ((,) e) where
zipMatchWith = genericZipMatchWith
instance (Eq e) => Matchable (Either e) where
zipMatchWith = genericZipMatchWith
instance Matchable Seq where
zipMatch as bs
| Seq.length as == Seq.length bs = Just (Seq.zip as bs)
| otherwise = Nothing
zipMatchWith u as bs
| Seq.length as == Seq.length bs = unsafeFillIn u as (Data.Foldable.toList bs)
| otherwise = Nothing
instance (Eq k) => Matchable (Map k) where
zipMatchWith u as bs
| Map.size as == Map.size bs =
Map.fromDistinctAscList <$>
zipMatchWith (zipMatchWith u) (Map.toAscList as) (Map.toAscList bs)
| otherwise = Nothing
instance Matchable IntMap where
zipMatchWith u as bs
| IntMap.size as == IntMap.size bs = merger as bs
| otherwise = Nothing
where
miss = IntMap.traverseMissing (\_ _ -> Nothing)
merger = IntMap.mergeA miss miss (IntMap.zipWithAMatched (const u))
instance Matchable Tree where
zipMatchWith = genericZipMatchWith
instance Matchable Vector where
zipMatch as bs
| Vector.length as == Vector.length bs = Just (Vector.zip as bs)
| otherwise = Nothing
zipMatchWith u as bs
| Vector.length as == Vector.length bs = Vector.zipWithM u as bs
| otherwise = Nothing
instance (Eq k, Hashable k) => Matchable (HashMap k) where
zipMatch as bs
| HashMap.size as == HashMap.size bs =
HashMap.traverseWithKey (\k a -> (,) a <$> HashMap.lookup k bs) as
| otherwise = Nothing
zipMatchWith u as bs
| HashMap.size as == HashMap.size bs =
HashMap.traverseWithKey (\k a -> u a =<< HashMap.lookup k bs) as
| otherwise = Nothing
instance (Generic1 f, Matchable' (Rep1 f)) => Matchable (Generically1 f) where
zipMatchWith f (Generically1 x) (Generically1 y) = Generically1 <$> genericZipMatchWith f x y
* Generic definition
An instance of Matchable can be implemened through GHC Generics .
As a prerequisite , you need to make your type an instance of ' Functor ' and ' Generic1 ' .
Both of them can be derived using DeriveFunctor and DeriveGeneric extension .
Using ' Generically1 ' and DerivingVia extension , @Matchable@ instance can be automatically derived .
> > > : set -XDeriveFunctor
> > > : set -XDeriveGeneric
> > > : set > > > : {
data MyTree label a = Leaf a | Node label [ MyTree label a ]
deriving stock ( Show , Read , Eq , Ord , Functor , Generic1 )
deriving ( Eq1 , Matchable ) via ( Generically1 ( MyTree label ) )
:}
Alternatively , you can use ' genericZipMatchWith ' to manually define @zipMatchWith@ method .
> instance ( Eq label ) = > Matchable ( MyTree label ) where
> zipMatchWith = genericZipMatchWith
> instance ( Eq label ) = > Eq1 ( MyTree label ) where
> liftEq = liftEqDefault
> > > ( Node " foo " [ Leaf 1 , Leaf 2 ] ) ( Node " foo " [ Leaf ' a ' , Leaf ' b ' ] )
Just ( Node " foo " [ Leaf ( 1,'a'),Leaf ( 2,'b ' ) ] )
> > > ( Node " foo " [ Leaf 1 , Leaf 2 ] ) ( Node " bar " [ Leaf ' a ' , Leaf ' b ' ] )
Nothing
> > > ( Node " foo " [ Leaf 1 ] ) ( Node " foo " [ ] )
Nothing
An instance of Matchable can be implemened through GHC Generics.
As a prerequisite, you need to make your type an instance of 'Functor' and 'Generic1'.
Both of them can be derived using DeriveFunctor and DeriveGeneric extension.
Using 'Generically1' and DerivingVia extension, @Matchable@ instance can be automatically derived.
>>> :set -XDeriveFunctor
>>> :set -XDeriveGeneric
>>> :set -XDerivingVia
>>> :{
data MyTree label a = Leaf a | Node label [MyTree label a]
deriving stock (Show, Read, Eq, Ord, Functor, Generic1)
deriving (Eq1, Matchable) via (Generically1 (MyTree label))
:}
Alternatively, you can use 'genericZipMatchWith' to manually define @zipMatchWith@ method.
> instance (Eq label) => Matchable (MyTree label) where
> zipMatchWith = genericZipMatchWith
> instance (Eq label) => Eq1 (MyTree label) where
> liftEq = liftEqDefault
>>> zipMatch (Node "foo" [Leaf 1, Leaf 2]) (Node "foo" [Leaf 'a', Leaf 'b'])
Just (Node "foo" [Leaf (1,'a'),Leaf (2,'b')])
>>> zipMatch (Node "foo" [Leaf 1, Leaf 2]) (Node "bar" [Leaf 'a', Leaf 'b'])
Nothing
>>> zipMatch (Node "foo" [Leaf 1]) (Node "foo" [])
Nothing
-}
class (Functor t, Eq1 t) => Matchable' t where
zipMatchWith' :: (a -> b -> Maybe c) -> t a -> t b -> Maybe (t c)
| zipMatchWith via Generics .
genericZipMatchWith
:: (Generic1 t, Matchable' (Rep1 t))
=> (a -> b -> Maybe c)
-> t a
-> t b
-> Maybe (t c)
genericZipMatchWith u ta tb = to1 <$> zipMatchWith' u (from1 ta) (from1 tb)
# INLINABLE genericZipMatchWith #
instance Matchable' V1 where
zipMatchWith' _ a _ = case a of { }
instance Matchable' U1 where
zipMatchWith' _ _ _ = pure U1
instance Matchable' Par1 where
zipMatchWith' u (Par1 a) (Par1 b) = Par1 <$> u a b
instance Matchable f => Matchable' (Rec1 f) where
zipMatchWith' u (Rec1 fa) (Rec1 fb) = Rec1 <$> zipMatchWith u fa fb
instance (Eq c) => Matchable' (K1 i c) where
zipMatchWith' _ (K1 ca) (K1 cb)
= if ca == cb then pure (K1 ca) else empty
instance Matchable' f => Matchable' (M1 i c f) where
zipMatchWith' u (M1 fa) (M1 fb) = M1 <$> zipMatchWith' u fa fb
instance (Matchable' f, Matchable' g) => Matchable' (f :+: g) where
zipMatchWith' u (L1 fa) (L1 fb) = L1 <$> zipMatchWith' u fa fb
zipMatchWith' u (R1 ga) (R1 gb) = R1 <$> zipMatchWith' u ga gb
zipMatchWith' _ _ _ = empty
instance (Matchable' f, Matchable' g) => Matchable' (f :*: g) where
zipMatchWith' u (fa :*: ga) (fb :*: gb) =
liftA2 (:*:) (zipMatchWith' u fa fb) (zipMatchWith' u ga gb)
instance (Matchable f, Matchable' g) => Matchable' (f :.: g) where
zipMatchWith' u (Comp1 fga) (Comp1 fgb) =
Comp1 <$> zipMatchWith (zipMatchWith' u) fga fgb
unsafeFillIn :: (Traversable f) => (a -> b -> Maybe c) -> f a -> [b] -> Maybe (f c)
unsafeFillIn u as bs = fst <$> runFillIn (traverse (useOne u) as) bs
newtype FillIn b a = FillIn { runFillIn :: [b] -> Maybe (a, [b]) }
deriving (Functor)
instance Applicative (FillIn b) where
pure a = FillIn $ \bs -> Just (a, bs)
FillIn fx <*> FillIn fy = FillIn $ \bs ->
fx bs >>= \(x, bs') ->
fy bs' >>= \(y, bs'') -> Just (x y, bs'')
useOne :: (a -> b -> Maybe c) -> a -> FillIn b c
useOne u a = FillIn $ \bs -> case bs of
[] -> Nothing
(b:bs') -> u a b >>= \c -> Just (c, bs')
|
1ba2e8944862e3cddfd83a7b3c05830675c0ab7be118c0666806e42ce558d50d | shuieryin/wechat_mud | scene_root_sup.erl | %%%-------------------------------------------------------------------
@author shuieryin
( C ) 2016 , Shuieryin
%%% @doc
%%%
%%% @end
Created : 03 . Mar 2016 9:17 PM
%%%-------------------------------------------------------------------
-module(scene_root_sup).
-author("shuieryin").
-behaviour(supervisor).
%% API
-export([
start_link/0
]).
%% Supervisor callbacks
-export([init/1]).
-define(SERVER, ?MODULE).
%%%===================================================================
%%% API functions
%%%===================================================================
%%--------------------------------------------------------------------
%% @doc
%% Starts the supervisor
%%
%% @end
%%--------------------------------------------------------------------
-spec start_link() -> supervisor:startlink_ret().
start_link() ->
supervisor:start_link({local, ?SERVER}, ?MODULE, []).
%%%===================================================================
%%% Supervisor callbacks
%%%===================================================================
%%--------------------------------------------------------------------
@private
%% @doc
%% Whenever a supervisor is started using supervisor:start_link/[2,3],
%% this function is called by the new process to find out about
%% restart strategy, maximum restart frequency and child
%% specifications.
%%
%% @end
%%--------------------------------------------------------------------
-spec init(Args :: term()) ->
{ok, {SupFlags :: supervisor:sup_flags(), [ChildSpec :: supervisor:child_spec()]}} | ignore.
init([]) ->
RestartStrategy = one_for_one,
MaxRestarts = 1000,
MaxSecondsBetweenRestarts = 3600,
SupFlags = {RestartStrategy, MaxRestarts, MaxSecondsBetweenRestarts},
{ok, {SupFlags, [
{scene_manager,
{scene_manager, start_link, []},
permanent,
10000,
worker,
[scene_manager]
},
{scene_statem_sup,
{scene_statem_sup, start_link, []},
permanent,
10000,
supervisor,
[scene_statem_sup]
}
]}}.
%%%===================================================================
%%% Internal functions (N/A)
%%%=================================================================== | null | https://raw.githubusercontent.com/shuieryin/wechat_mud/b2a9251a9b208fee5cd8c4213759750b95c8b8aa/src/scene/scene_root_sup.erl | erlang | -------------------------------------------------------------------
@doc
@end
-------------------------------------------------------------------
API
Supervisor callbacks
===================================================================
API functions
===================================================================
--------------------------------------------------------------------
@doc
Starts the supervisor
@end
--------------------------------------------------------------------
===================================================================
Supervisor callbacks
===================================================================
--------------------------------------------------------------------
@doc
Whenever a supervisor is started using supervisor:start_link/[2,3],
this function is called by the new process to find out about
restart strategy, maximum restart frequency and child
specifications.
@end
--------------------------------------------------------------------
===================================================================
Internal functions (N/A)
=================================================================== | @author shuieryin
( C ) 2016 , Shuieryin
Created : 03 . Mar 2016 9:17 PM
-module(scene_root_sup).
-author("shuieryin").
-behaviour(supervisor).
-export([
start_link/0
]).
-export([init/1]).
-define(SERVER, ?MODULE).
-spec start_link() -> supervisor:startlink_ret().
start_link() ->
supervisor:start_link({local, ?SERVER}, ?MODULE, []).
@private
-spec init(Args :: term()) ->
{ok, {SupFlags :: supervisor:sup_flags(), [ChildSpec :: supervisor:child_spec()]}} | ignore.
init([]) ->
RestartStrategy = one_for_one,
MaxRestarts = 1000,
MaxSecondsBetweenRestarts = 3600,
SupFlags = {RestartStrategy, MaxRestarts, MaxSecondsBetweenRestarts},
{ok, {SupFlags, [
{scene_manager,
{scene_manager, start_link, []},
permanent,
10000,
worker,
[scene_manager]
},
{scene_statem_sup,
{scene_statem_sup, start_link, []},
permanent,
10000,
supervisor,
[scene_statem_sup]
}
]}}.
|
7c9a83903b1b285508b2d505d19c62ee924492b123081587f99d0d8ed92bc9cf | cpichard/filesequence | Hash.hs | -- |Function to compute the hash of all files in a sequence
module System.FileSequence.Hash where
import qualified Data.ByteString as BS
import System.FileSequence
import System.FileSequence.Internal
import System.IO
import Crypto.Hash
import Control.Monad
-- |First naive implementation of producing a hash from a sequence of files
fileSequenceSum :: FileSequence -> IO String
fileSequenceSum fs = do
digest <- foldM processFile (hashInit :: Context SHA224) (map pathToString (frameList fs))
let hend = hashFinalize digest
return $ show hend
where processFile mdc f = withFile f ReadMode (`hashFile` mdc)
hashFile h m = do
eof <- hIsEOF h
if eof
then return m
else do
chunk <- BS.hGet h 512
let mdc = hashUpdate m chunk
mdc `seq` hashFile h mdc
| null | https://raw.githubusercontent.com/cpichard/filesequence/39cd8eb7dd0bc494c181c5b04fc9ff2fae5202d0/src/System/FileSequence/Hash.hs | haskell | |Function to compute the hash of all files in a sequence
|First naive implementation of producing a hash from a sequence of files |
module System.FileSequence.Hash where
import qualified Data.ByteString as BS
import System.FileSequence
import System.FileSequence.Internal
import System.IO
import Crypto.Hash
import Control.Monad
fileSequenceSum :: FileSequence -> IO String
fileSequenceSum fs = do
digest <- foldM processFile (hashInit :: Context SHA224) (map pathToString (frameList fs))
let hend = hashFinalize digest
return $ show hend
where processFile mdc f = withFile f ReadMode (`hashFile` mdc)
hashFile h m = do
eof <- hIsEOF h
if eof
then return m
else do
chunk <- BS.hGet h 512
let mdc = hashUpdate m chunk
mdc `seq` hashFile h mdc
|
3bc0539c8a22231585e6b0bfd16cc6503abda10ae1104b2b252733409f73e9ae | a9032676/Codewars-Haskell | Imperative.hs | # LANGUAGE DeriveFunctor #
# LANGUAGE TupleSections #
module Imperative (def, var, lit, while, (+=), (-=), (*=)) where
import Control.Monad.State (State, state, put, get, modify, runState, execState)
import Control.Lens(ix, (%~))
data Free f a = Pure a | Impure (f (Free f a))
instance Functor f => Functor (Free f) where
fmap f (Pure a) = Pure $ f a
fmap f (Impure r) = Impure $ fmap f <$> r
instance Functor f => Applicative (Free f) where
pure = Pure
Pure f <*> m = fmap f m
Impure f <*> a = Impure $ fmap (<*> a) f
instance Functor f => Monad (Free f) where
return = pure
Pure a >>= f = f a
Impure m >>= f = Impure $ (>>= f) <$> m
liftF :: Functor f => f a -> Free f a
liftF f = Impure $ fmap Pure f
data Var s a = Ref s | Lit a
data Calc s a r
= CVar a (Var s a -> r)
| CModify (a -> a -> a) (Var s a) (Var s a) r
| CWhile (Var s a) (a -> Bool) (FCalc s a ()) r
deriving Functor
type FCalc s a = Free (Calc s a)
var :: a -> FCalc s a (Var s a)
var a = liftF $ CVar a id
lit :: a -> Var s a
lit = Lit
modifyC :: (a -> a -> a) -> Var s a -> Var s a -> FCalc s a ()
modifyC f v v' = liftF $ CModify f v v' ()
(+=), (-=), (*=) :: Num a => Var s a -> Var s a -> FCalc s a ()
(+=) = modifyC (+)
(*=) = modifyC (*)
(-=) = modifyC (-)
while :: Var s a -> (a -> Bool) -> FCalc s a () -> FCalc s a ()
while v p c = liftF $ CWhile v p c ()
return : : a = > Var s a - > s a a
-- return v = liftF $ CReturn v id
def :: FCalc Int a (Var Int a) -> a
def free = case runState (runCalc free) [] of
(Lit a, _) -> a
(Ref i, l) -> l !! i
runCalc :: FCalc Int a r -> State [a] r
runCalc (Pure a ) = state (a, )
runCalc (Impure (CVar a f )) = get >>= \l -> put (l ++ [a]) >> runCalc (f . Ref $ length l)
runCalc (Impure (CModify _ (Lit _) _ _)) = error "Cannot modify literal"
runCalc (Impure (CModify f (Ref i) (Lit v) r)) = get >>= \l -> put (ix i %~ flip f v $ l) >> runCalc r
runCalc (Impure (CModify f (Ref i) (Ref j) r)) = get >>= \l -> put (ix i %~ flip f (l !! j) $ l) >> runCalc r
runCalc (Impure (CWhile v p c r )) = get >>= \l -> put (runWhile l v p c) >> runCalc r
runWhile :: [a] -> Var Int v -> (a -> Bool) -> FCalc Int a () -> [a]
runWhile l r@(Ref i) p c =
if p (l !! i)
then runWhile (execState (runCalc c) l) r p c
else l
-- factorial :: Integer -> Integer
-- factorial n = def $ do
result < - var 1
-- i <- var n
-- while i (>0) $ do
-- result *= i
i -= lit 1
-- return result | null | https://raw.githubusercontent.com/a9032676/Codewars-Haskell/6eccd81e9b6ae31b5c0a28ecc16b933f3abae1a5/src/Imperative.hs | haskell | return v = liftF $ CReturn v id
factorial :: Integer -> Integer
factorial n = def $ do
i <- var n
while i (>0) $ do
result *= i
return result | # LANGUAGE DeriveFunctor #
# LANGUAGE TupleSections #
module Imperative (def, var, lit, while, (+=), (-=), (*=)) where
import Control.Monad.State (State, state, put, get, modify, runState, execState)
import Control.Lens(ix, (%~))
data Free f a = Pure a | Impure (f (Free f a))
instance Functor f => Functor (Free f) where
fmap f (Pure a) = Pure $ f a
fmap f (Impure r) = Impure $ fmap f <$> r
instance Functor f => Applicative (Free f) where
pure = Pure
Pure f <*> m = fmap f m
Impure f <*> a = Impure $ fmap (<*> a) f
instance Functor f => Monad (Free f) where
return = pure
Pure a >>= f = f a
Impure m >>= f = Impure $ (>>= f) <$> m
liftF :: Functor f => f a -> Free f a
liftF f = Impure $ fmap Pure f
data Var s a = Ref s | Lit a
data Calc s a r
= CVar a (Var s a -> r)
| CModify (a -> a -> a) (Var s a) (Var s a) r
| CWhile (Var s a) (a -> Bool) (FCalc s a ()) r
deriving Functor
type FCalc s a = Free (Calc s a)
var :: a -> FCalc s a (Var s a)
var a = liftF $ CVar a id
lit :: a -> Var s a
lit = Lit
modifyC :: (a -> a -> a) -> Var s a -> Var s a -> FCalc s a ()
modifyC f v v' = liftF $ CModify f v v' ()
(+=), (-=), (*=) :: Num a => Var s a -> Var s a -> FCalc s a ()
(+=) = modifyC (+)
(*=) = modifyC (*)
(-=) = modifyC (-)
while :: Var s a -> (a -> Bool) -> FCalc s a () -> FCalc s a ()
while v p c = liftF $ CWhile v p c ()
return : : a = > Var s a - > s a a
def :: FCalc Int a (Var Int a) -> a
def free = case runState (runCalc free) [] of
(Lit a, _) -> a
(Ref i, l) -> l !! i
runCalc :: FCalc Int a r -> State [a] r
runCalc (Pure a ) = state (a, )
runCalc (Impure (CVar a f )) = get >>= \l -> put (l ++ [a]) >> runCalc (f . Ref $ length l)
runCalc (Impure (CModify _ (Lit _) _ _)) = error "Cannot modify literal"
runCalc (Impure (CModify f (Ref i) (Lit v) r)) = get >>= \l -> put (ix i %~ flip f v $ l) >> runCalc r
runCalc (Impure (CModify f (Ref i) (Ref j) r)) = get >>= \l -> put (ix i %~ flip f (l !! j) $ l) >> runCalc r
runCalc (Impure (CWhile v p c r )) = get >>= \l -> put (runWhile l v p c) >> runCalc r
runWhile :: [a] -> Var Int v -> (a -> Bool) -> FCalc Int a () -> [a]
runWhile l r@(Ref i) p c =
if p (l !! i)
then runWhile (execState (runCalc c) l) r p c
else l
result < - var 1
i -= lit 1 |
4abcc36332b80d80f6b0f17c581d96c7914fb9e78cc0e42c20e610f6c9da2ae4 | databrary/databrary | Periodic.hs | # LANGUAGE TupleSections , , ScopedTypeVariables #
module Service.Periodic
( forkPeriodic
) where
import Control.Concurrent (ThreadId, forkFinally, threadDelay)
import Control.Exception (handle, mask)
import Control.Monad (void, when)
import Control.Monad.Trans.Reader (withReaderT)
import Data.Fixed (Fixed(..), Micro)
import Data.IORef (writeIORef)
import Data.Time.Calendar.OrdinalDate (sundayStartWeek)
import Data.Time.Clock (UTCTime(..), diffUTCTime, getCurrentTime)
import Data.Time.LocalTime (TimeOfDay(TimeOfDay), timeOfDayToTime)
import Has
import Service.Types
import Service.Log
import Service.Notification
import Context
import Model.Periodic
import Model.Token
import Model.Volume
import Model.Stats
import Model.Notification
import Controller.Notification
import Solr.Index
TODO
threadDelay' :: Micro -> IO ()
threadDelay' (MkFixed t)
| t > m' = threadDelay m >> threadDelay' (MkFixed (t - m'))
| otherwise = threadDelay (fromInteger t)
where
m' = toInteger m
m = maxBound
run :: Period -> Service -> IO ()
run p = runContextM $ withReaderT BackgroundContext $ do
t <- peek
focusIO $ logMsg t ("periodic running: " ++ show p)
cleanTokens
updateVolumeIndex
withReaderT (mkSolrIndexingContext . backgroundContext) updateIndex
ss <- lookupSiteStats
focusIO $ (`writeIORef` ss) . serviceStats
when (p >= PeriodWeekly) $
void updateEZID
_ <- cleanNotifications
updateStateNotifications
focusIO $ triggerNotifications (Just p)
runPeriodic :: Service -> (forall a . IO a -> IO a) -> IO ()
runPeriodic rc unmask = loop (if s <= st then d s else s) where
st = serviceStartTime rc
s = st{ utctDayTime = timeOfDayToTime $ TimeOfDay 7 0 0 }
d t = t{ utctDay = succ (utctDay t) }
loop t = do
n <- getCurrentTime
(t', p) <- handle (return . (t ,)) $ do
unmask $ threadDelay' $ realToFrac $ diffUTCTime t n
return (d t, if 0 == snd (sundayStartWeek (utctDay t))
then PeriodWeekly
else PeriodDaily)
handle (\(_ :: Period) -> logMsg t "periodic interrupted" (view rc)) $
unmask $ run p rc
loop t'
forkPeriodic :: Service -> IO ThreadId
forkPeriodic rc = forkFinally (mask $ runPeriodic rc) $ \r -> do
t <- getCurrentTime
logMsg t ("periodic aborted: " ++ show r) (view rc)
| null | https://raw.githubusercontent.com/databrary/databrary/685f3c625b960268f5d9b04e3d7c6146bea5afda/src/Service/Periodic.hs | haskell | # LANGUAGE TupleSections , , ScopedTypeVariables #
module Service.Periodic
( forkPeriodic
) where
import Control.Concurrent (ThreadId, forkFinally, threadDelay)
import Control.Exception (handle, mask)
import Control.Monad (void, when)
import Control.Monad.Trans.Reader (withReaderT)
import Data.Fixed (Fixed(..), Micro)
import Data.IORef (writeIORef)
import Data.Time.Calendar.OrdinalDate (sundayStartWeek)
import Data.Time.Clock (UTCTime(..), diffUTCTime, getCurrentTime)
import Data.Time.LocalTime (TimeOfDay(TimeOfDay), timeOfDayToTime)
import Has
import Service.Types
import Service.Log
import Service.Notification
import Context
import Model.Periodic
import Model.Token
import Model.Volume
import Model.Stats
import Model.Notification
import Controller.Notification
import Solr.Index
TODO
threadDelay' :: Micro -> IO ()
threadDelay' (MkFixed t)
| t > m' = threadDelay m >> threadDelay' (MkFixed (t - m'))
| otherwise = threadDelay (fromInteger t)
where
m' = toInteger m
m = maxBound
run :: Period -> Service -> IO ()
run p = runContextM $ withReaderT BackgroundContext $ do
t <- peek
focusIO $ logMsg t ("periodic running: " ++ show p)
cleanTokens
updateVolumeIndex
withReaderT (mkSolrIndexingContext . backgroundContext) updateIndex
ss <- lookupSiteStats
focusIO $ (`writeIORef` ss) . serviceStats
when (p >= PeriodWeekly) $
void updateEZID
_ <- cleanNotifications
updateStateNotifications
focusIO $ triggerNotifications (Just p)
runPeriodic :: Service -> (forall a . IO a -> IO a) -> IO ()
runPeriodic rc unmask = loop (if s <= st then d s else s) where
st = serviceStartTime rc
s = st{ utctDayTime = timeOfDayToTime $ TimeOfDay 7 0 0 }
d t = t{ utctDay = succ (utctDay t) }
loop t = do
n <- getCurrentTime
(t', p) <- handle (return . (t ,)) $ do
unmask $ threadDelay' $ realToFrac $ diffUTCTime t n
return (d t, if 0 == snd (sundayStartWeek (utctDay t))
then PeriodWeekly
else PeriodDaily)
handle (\(_ :: Period) -> logMsg t "periodic interrupted" (view rc)) $
unmask $ run p rc
loop t'
forkPeriodic :: Service -> IO ThreadId
forkPeriodic rc = forkFinally (mask $ runPeriodic rc) $ \r -> do
t <- getCurrentTime
logMsg t ("periodic aborted: " ++ show r) (view rc)
| |
0860d096ad53eeac5e0857246931c366375c8abc319731f94a76202409d491e8 | anuragsoni/postgres-protocol | test_driver_lwt.ml | open Lwt.Syntax
let make_parameters ids =
List.to_seq ids
|> Seq.map (fun id ->
let b = Bytes.create 4 in
Caml.Bytes.set_int32_be b 0 id;
`Binary, Some (Bytes.to_string b))
|> Array.of_seq
;;
let prepare_query name conn =
Postgres_lwt.prepare
~name
~statement:"SELECT id, email from users where id IN ($1, $2, $3)"
conn
;;
let setup conn =
let open Lwt_result.Infix in
let drop_users = "DROP TABLE IF EXISTS users;" in
let create_users =
{|
CREATE TABLE IF NOT EXISTS users(
id SERIAL PRIMARY KEY,
email VARCHAR(40) NOT NULL UNIQUE
);
|}
in
let create_random_users =
{|
INSERT INTO users(email)
SELECT
'user_' || seq || '@' || (
CASE (RANDOM() * 2)::INT
WHEN 0 THEN 'gmail'
WHEN 1 THEN 'hotmail'
WHEN 2 THEN 'yahoo'
END
) || '.com' AS email
FROM GENERATE_SERIES(1, 10) seq;
|}
in
Postgres_lwt.prepare ~statement:drop_users conn
>>= fun () ->
Postgres_lwt.execute (fun _ -> ()) conn
>>= fun () ->
Postgres_lwt.prepare ~statement:create_users conn
>>= fun () ->
Postgres_lwt.execute (fun _ -> ()) conn
>>= fun () ->
Postgres_lwt.prepare ~statement:create_random_users conn
>>= fun () -> Postgres_lwt.execute (fun _ -> ()) conn
;;
let run statement_name conn ids =
let parameters = make_parameters ids in
(* If we use named prepared queries, we can reference them by name later on in the
session lifecycle. *)
Postgres_lwt.execute
~statement_name
~parameters
(fun data_row ->
match data_row with
| [ Some id; Some name ] -> Logs.info (fun m -> m "Id: %s and email: %s" id name)
| _ -> assert false)
conn
;;
let execute conn =
let open Lwt_result.Syntax in
let* () = setup conn in
let name = "my_unique_query" in
let* () = prepare_query name conn in
let* () = run name conn [ 9l; 2l; 3l ]
and* () = run name conn [ 2l; 4l; 10l ]
and* () = run name conn [ 1l; 7l; 2l ]
and* () = run name conn [ 78l; 11l; 6l ] in
let+ () = Postgres_lwt.terminate conn in
Logs.info (fun m -> m "Finished")
;;
let run host port user password database ssl =
let* user =
match user with
| None -> Lwt_unix.getlogin ()
| Some u -> Lwt.return u
in
let tls_config =
match ssl with
| false -> None
| true ->
let authenticator ~host:_ _ = Ok None in
Some (Tls.Config.client ~authenticator ())
in
let database = Option.value ~default:user database in
let user_info = Postgres.Connection.User_info.make ~user ~password ~database () in
let open Lwt_result.Infix in
Postgres_lwt_unix.(connect ?tls_config user_info (Inet (host, port)))
>>= fun conn -> execute conn
;;
let cmd =
let open Cmdliner in
let port =
let doc = "port number for the postgres server" in
Arg.(value & opt int 5432 & info [ "p"; "port" ] ~doc)
in
let host =
let doc = "hostname for the postgres server" in
Arg.(value & opt string "localhost" & info [ "host" ] ~doc)
in
let user =
let doc = "postgres user" in
Arg.(value & opt (some string) None & info [ "user" ] ~doc)
in
let password =
let doc = "postgres password" in
Arg.(value & opt string "" & info [ "password" ] ~doc)
in
let database =
let doc = "postgres database" in
Arg.(value & opt (some string) None & info [ "database" ] ~doc)
in
let ssl =
let doc = "setup a tls encrypted connection" in
Arg.(value & flag & info [ "ssl" ] ~doc)
in
let doc = "Postgres example" in
let term = Term.(const run $ host $ port $ user $ password $ database $ ssl) in
term, Term.info "postgres_lwt" ~doc
;;
let run_cmd cmd =
let open Cmdliner in
let open Lwt.Infix in
match Term.eval cmd with
| `Ok res ->
let p =
res
>>= function
| Ok () -> Lwt.return ()
| Error err -> Postgres.Error.raise err
in
Lwt_main.run p
| _ -> exit 1
;;
let () = Mirage_crypto_rng_unix.initialize ()
let () =
Logs.set_reporter (Logs_fmt.reporter ());
Logs.set_level ~all:true (Some Info);
Fmt_tty.setup_std_outputs ();
run_cmd cmd
;;
| null | https://raw.githubusercontent.com/anuragsoni/postgres-protocol/e3e1743c138e527a2285dfd1eb240f49fd764b25/example/lwt/test_driver_lwt.ml | ocaml | If we use named prepared queries, we can reference them by name later on in the
session lifecycle. | open Lwt.Syntax
let make_parameters ids =
List.to_seq ids
|> Seq.map (fun id ->
let b = Bytes.create 4 in
Caml.Bytes.set_int32_be b 0 id;
`Binary, Some (Bytes.to_string b))
|> Array.of_seq
;;
let prepare_query name conn =
Postgres_lwt.prepare
~name
~statement:"SELECT id, email from users where id IN ($1, $2, $3)"
conn
;;
let setup conn =
let open Lwt_result.Infix in
let drop_users = "DROP TABLE IF EXISTS users;" in
let create_users =
{|
CREATE TABLE IF NOT EXISTS users(
id SERIAL PRIMARY KEY,
email VARCHAR(40) NOT NULL UNIQUE
);
|}
in
let create_random_users =
{|
INSERT INTO users(email)
SELECT
'user_' || seq || '@' || (
CASE (RANDOM() * 2)::INT
WHEN 0 THEN 'gmail'
WHEN 1 THEN 'hotmail'
WHEN 2 THEN 'yahoo'
END
) || '.com' AS email
FROM GENERATE_SERIES(1, 10) seq;
|}
in
Postgres_lwt.prepare ~statement:drop_users conn
>>= fun () ->
Postgres_lwt.execute (fun _ -> ()) conn
>>= fun () ->
Postgres_lwt.prepare ~statement:create_users conn
>>= fun () ->
Postgres_lwt.execute (fun _ -> ()) conn
>>= fun () ->
Postgres_lwt.prepare ~statement:create_random_users conn
>>= fun () -> Postgres_lwt.execute (fun _ -> ()) conn
;;
let run statement_name conn ids =
let parameters = make_parameters ids in
Postgres_lwt.execute
~statement_name
~parameters
(fun data_row ->
match data_row with
| [ Some id; Some name ] -> Logs.info (fun m -> m "Id: %s and email: %s" id name)
| _ -> assert false)
conn
;;
let execute conn =
let open Lwt_result.Syntax in
let* () = setup conn in
let name = "my_unique_query" in
let* () = prepare_query name conn in
let* () = run name conn [ 9l; 2l; 3l ]
and* () = run name conn [ 2l; 4l; 10l ]
and* () = run name conn [ 1l; 7l; 2l ]
and* () = run name conn [ 78l; 11l; 6l ] in
let+ () = Postgres_lwt.terminate conn in
Logs.info (fun m -> m "Finished")
;;
let run host port user password database ssl =
let* user =
match user with
| None -> Lwt_unix.getlogin ()
| Some u -> Lwt.return u
in
let tls_config =
match ssl with
| false -> None
| true ->
let authenticator ~host:_ _ = Ok None in
Some (Tls.Config.client ~authenticator ())
in
let database = Option.value ~default:user database in
let user_info = Postgres.Connection.User_info.make ~user ~password ~database () in
let open Lwt_result.Infix in
Postgres_lwt_unix.(connect ?tls_config user_info (Inet (host, port)))
>>= fun conn -> execute conn
;;
let cmd =
let open Cmdliner in
let port =
let doc = "port number for the postgres server" in
Arg.(value & opt int 5432 & info [ "p"; "port" ] ~doc)
in
let host =
let doc = "hostname for the postgres server" in
Arg.(value & opt string "localhost" & info [ "host" ] ~doc)
in
let user =
let doc = "postgres user" in
Arg.(value & opt (some string) None & info [ "user" ] ~doc)
in
let password =
let doc = "postgres password" in
Arg.(value & opt string "" & info [ "password" ] ~doc)
in
let database =
let doc = "postgres database" in
Arg.(value & opt (some string) None & info [ "database" ] ~doc)
in
let ssl =
let doc = "setup a tls encrypted connection" in
Arg.(value & flag & info [ "ssl" ] ~doc)
in
let doc = "Postgres example" in
let term = Term.(const run $ host $ port $ user $ password $ database $ ssl) in
term, Term.info "postgres_lwt" ~doc
;;
let run_cmd cmd =
let open Cmdliner in
let open Lwt.Infix in
match Term.eval cmd with
| `Ok res ->
let p =
res
>>= function
| Ok () -> Lwt.return ()
| Error err -> Postgres.Error.raise err
in
Lwt_main.run p
| _ -> exit 1
;;
let () = Mirage_crypto_rng_unix.initialize ()
let () =
Logs.set_reporter (Logs_fmt.reporter ());
Logs.set_level ~all:true (Some Info);
Fmt_tty.setup_std_outputs ();
run_cmd cmd
;;
|
3e43081a36dd459ddac30714a0a208c2434a7e256aa82d82b060affe38ba1602 | twittner/cql-io | PrepQuery.hs | This Source Code Form is subject to the terms of the Mozilla Public
License , v. 2.0 . If a copy of the MPL was not distributed with this
file , You can obtain one at /.
module Database.CQL.IO.PrepQuery
( PrepQuery
, prepared
, queryString
, PreparedQueries
, new
, lookupQueryId
, lookupQueryString
, insert
, delete
, queryStrings
) where
import Control.Applicative
import Control.Concurrent.STM
import Control.Monad
import Crypto.Hash.SHA1
import Data.ByteString (ByteString)
import Data.Text.Lazy (Text)
import Data.Text.Lazy.Encoding (encodeUtf8)
import Data.Foldable (for_)
import Data.Map.Strict (Map)
import Data.String
import Database.CQL.Protocol hiding (Map)
import Database.CQL.IO.Types (HashCollision (..))
import Prelude
import qualified Data.Map.Strict as M
-----------------------------------------------------------------------------
-- Prepared Query
-- | Representation of a prepared query.
-- Actual preparation is handled transparently by the driver.
data PrepQuery k a b = PrepQuery
{ pqStr :: !(QueryString k a b)
, pqId :: !PrepQueryId
}
instance IsString (PrepQuery k a b) where
fromString = prepared . fromString
newtype PrepQueryId = PrepQueryId ByteString deriving (Eq, Ord)
prepared :: QueryString k a b -> PrepQuery k a b
prepared q = PrepQuery q $ PrepQueryId (hashlazy . encodeUtf8 . unQueryString $ q)
queryString :: PrepQuery k a b -> QueryString k a b
queryString = pqStr
-----------------------------------------------------------------------------
-- Map of prepared queries to their query ID and query string
newtype QST = QST { unQST :: Text }
newtype QID = QID { unQID :: ByteString } deriving (Eq, Ord)
data PreparedQueries = PreparedQueries
{ queryMap :: !(TVar (Map PrepQueryId (QID, QST)))
, qid2Str :: !(TVar (Map QID QST))
}
new :: IO PreparedQueries
new = PreparedQueries <$> newTVarIO M.empty <*> newTVarIO M.empty
lookupQueryId :: PrepQuery k a b -> PreparedQueries -> STM (Maybe (QueryId k a b))
lookupQueryId q m = do
qm <- readTVar (queryMap m)
return $ QueryId . unQID . fst <$> M.lookup (pqId q) qm
lookupQueryString :: QueryId k a b -> PreparedQueries -> STM (Maybe (QueryString k a b))
lookupQueryString q m = do
qm <- readTVar (qid2Str m)
return $ QueryString . unQST <$> M.lookup (QID $ unQueryId q) qm
insert :: PrepQuery k a b -> QueryId k a b -> PreparedQueries -> STM ()
insert q i m = do
qq <- M.lookup (pqId q) <$> readTVar (queryMap m)
for_ qq (verify . snd)
modifyTVar' (queryMap m) $
M.insert (pqId q) (QID $ unQueryId i, QST $ unQueryString (pqStr q))
modifyTVar' (qid2Str m) $
M.insert (QID $ unQueryId i) (QST $ unQueryString (pqStr q))
where
verify qs =
unless (unQST qs == unQueryString (pqStr q)) $ do
let a = unQST qs
let b = unQueryString (pqStr q)
throwSTM (HashCollision a b)
delete :: PrepQuery k a b -> PreparedQueries -> STM ()
delete q m = do
qid <- M.lookup (pqId q) <$> readTVar (queryMap m)
modifyTVar' (queryMap m) $ M.delete (pqId q)
case qid of
Nothing -> return ()
Just i -> modifyTVar' (qid2Str m) $ M.delete (fst i)
queryStrings :: PreparedQueries -> STM [Text]
queryStrings m = map (unQST . snd) . M.elems <$> readTVar (queryMap m)
| null | https://raw.githubusercontent.com/twittner/cql-io/090b436a413d961a424376c0b1dcc0c223472188/src/Database/CQL/IO/PrepQuery.hs | haskell | ---------------------------------------------------------------------------
Prepared Query
| Representation of a prepared query.
Actual preparation is handled transparently by the driver.
---------------------------------------------------------------------------
Map of prepared queries to their query ID and query string | This Source Code Form is subject to the terms of the Mozilla Public
License , v. 2.0 . If a copy of the MPL was not distributed with this
file , You can obtain one at /.
module Database.CQL.IO.PrepQuery
( PrepQuery
, prepared
, queryString
, PreparedQueries
, new
, lookupQueryId
, lookupQueryString
, insert
, delete
, queryStrings
) where
import Control.Applicative
import Control.Concurrent.STM
import Control.Monad
import Crypto.Hash.SHA1
import Data.ByteString (ByteString)
import Data.Text.Lazy (Text)
import Data.Text.Lazy.Encoding (encodeUtf8)
import Data.Foldable (for_)
import Data.Map.Strict (Map)
import Data.String
import Database.CQL.Protocol hiding (Map)
import Database.CQL.IO.Types (HashCollision (..))
import Prelude
import qualified Data.Map.Strict as M
data PrepQuery k a b = PrepQuery
{ pqStr :: !(QueryString k a b)
, pqId :: !PrepQueryId
}
instance IsString (PrepQuery k a b) where
fromString = prepared . fromString
newtype PrepQueryId = PrepQueryId ByteString deriving (Eq, Ord)
prepared :: QueryString k a b -> PrepQuery k a b
prepared q = PrepQuery q $ PrepQueryId (hashlazy . encodeUtf8 . unQueryString $ q)
queryString :: PrepQuery k a b -> QueryString k a b
queryString = pqStr
newtype QST = QST { unQST :: Text }
newtype QID = QID { unQID :: ByteString } deriving (Eq, Ord)
data PreparedQueries = PreparedQueries
{ queryMap :: !(TVar (Map PrepQueryId (QID, QST)))
, qid2Str :: !(TVar (Map QID QST))
}
new :: IO PreparedQueries
new = PreparedQueries <$> newTVarIO M.empty <*> newTVarIO M.empty
lookupQueryId :: PrepQuery k a b -> PreparedQueries -> STM (Maybe (QueryId k a b))
lookupQueryId q m = do
qm <- readTVar (queryMap m)
return $ QueryId . unQID . fst <$> M.lookup (pqId q) qm
lookupQueryString :: QueryId k a b -> PreparedQueries -> STM (Maybe (QueryString k a b))
lookupQueryString q m = do
qm <- readTVar (qid2Str m)
return $ QueryString . unQST <$> M.lookup (QID $ unQueryId q) qm
insert :: PrepQuery k a b -> QueryId k a b -> PreparedQueries -> STM ()
insert q i m = do
qq <- M.lookup (pqId q) <$> readTVar (queryMap m)
for_ qq (verify . snd)
modifyTVar' (queryMap m) $
M.insert (pqId q) (QID $ unQueryId i, QST $ unQueryString (pqStr q))
modifyTVar' (qid2Str m) $
M.insert (QID $ unQueryId i) (QST $ unQueryString (pqStr q))
where
verify qs =
unless (unQST qs == unQueryString (pqStr q)) $ do
let a = unQST qs
let b = unQueryString (pqStr q)
throwSTM (HashCollision a b)
delete :: PrepQuery k a b -> PreparedQueries -> STM ()
delete q m = do
qid <- M.lookup (pqId q) <$> readTVar (queryMap m)
modifyTVar' (queryMap m) $ M.delete (pqId q)
case qid of
Nothing -> return ()
Just i -> modifyTVar' (qid2Str m) $ M.delete (fst i)
queryStrings :: PreparedQueries -> STM [Text]
queryStrings m = map (unQST . snd) . M.elems <$> readTVar (queryMap m)
|
de4349663124ca5975c2394a5daa3251c3f8dee501d70760e35f9537e60f78a2 | mirage/decompress | decompress.ml | let w = De.make_window ~bits:15
let l = De.Lz77.make_window ~bits:15
let o = De.bigstring_create De.io_buffer_size
let i = De.bigstring_create De.io_buffer_size
let q = De.Queue.create 4096
let str fmt = Format.asprintf fmt
let msgf fmt = Format.kasprintf (fun msg -> `Msg msg) fmt
let error_msgf fmt = Format.kasprintf (fun err -> Error (`Msg err)) fmt
let bigstring_input ic buf off len =
let tmp = Bytes.create len in
try
let len = input ic tmp 0 len in
for i = 0 to len - 1 do
buf.{off + i} <- Bytes.get tmp i
done
; len
with End_of_file -> 0
let bigstring_output oc buf off len =
let res = Bytes.create len in
for i = 0 to len - 1 do
Bytes.set res i buf.{off + i}
done
; output_string oc (Bytes.unsafe_to_string res)
let run_inflate ic oc =
let open De in
let decoder = Inf.decoder `Manual ~o ~w in
let rec go () =
match Inf.decode decoder with
| `Await ->
let len = bigstring_input ic i 0 io_buffer_size in
Inf.src decoder i 0 len ; go ()
| `Flush ->
let len = io_buffer_size - Inf.dst_rem decoder in
bigstring_output oc o 0 len
; Inf.flush decoder
; go ()
| `Malformed err -> `Error (false, str "%s." err)
| `End ->
let len = io_buffer_size - Inf.dst_rem decoder in
if len > 0 then bigstring_output oc o 0 len
; `Ok 0 in
go ()
let run_deflate ~level ic oc =
let open De in
let state = Lz77.state ~level ~q ~w:l (`Channel ic) in
let encoder = Def.encoder (`Channel oc) ~q in
let rec compress () =
match De.Lz77.compress state with
| `Await -> assert false
| `Flush ->
let literals = Lz77.literals state in
let distances = Lz77.distances state in
encode
@@ Def.encode encoder
(`Block
{
Def.kind=
Dynamic (Def.dynamic_of_frequencies ~literals ~distances)
; last= false
})
| `End ->
Queue.push_exn q Queue.eob
; pending @@ Def.encode encoder (`Block {Def.kind= Fixed; last= true})
and pending = function `Partial | `Block -> assert false | `Ok -> ()
and encode = function
| `Partial -> assert false
| `Ok | `Block -> compress () in
Def.dst encoder o 0 io_buffer_size
; compress ()
; `Ok 0
let run_zlib_inflate ic oc =
let open Zl in
let allocate bits = De.make_window ~bits in
let decoder = Inf.decoder `Manual ~o ~allocate in
let rec go decoder =
match Inf.decode decoder with
| `Await decoder ->
let len = bigstring_input ic i 0 De.io_buffer_size in
Inf.src decoder i 0 len |> go
| `Flush decoder ->
let len = De.io_buffer_size - Inf.dst_rem decoder in
bigstring_output oc o 0 len
; Inf.flush decoder |> go
| `Malformed err -> `Error (false, str "%s." err)
| `End decoder ->
let len = De.io_buffer_size - Inf.dst_rem decoder in
if len > 0 then bigstring_output oc o 0 len
; `Ok 0 in
go decoder
let run_zlib_deflate ~level ic oc =
let open Zl in
let encoder = Def.encoder `Manual `Manual ~q ~w:l ~level in
let rec go encoder =
match Def.encode encoder with
| `Await encoder ->
let len = bigstring_input ic i 0 De.io_buffer_size in
Def.src encoder i 0 len |> go
| `Flush encoder ->
let len = De.io_buffer_size - Def.dst_rem encoder in
bigstring_output oc o 0 len
; Def.dst encoder o 0 De.io_buffer_size |> go
| `End encoder ->
let len = De.io_buffer_size - Def.dst_rem encoder in
if len > 0 then bigstring_output oc o 0 len
; `Ok 0 in
Def.dst encoder o 0 De.io_buffer_size |> go
let run_gzip_inflate ic oc =
let open Gz in
let decoder = Inf.decoder `Manual ~o in
let rec go decoder =
match Inf.decode decoder with
| `Await decoder ->
let len = bigstring_input ic i 0 io_buffer_size in
Inf.src decoder i 0 len |> go
| `Flush decoder ->
let len = io_buffer_size - Inf.dst_rem decoder in
bigstring_output oc o 0 len
; Inf.flush decoder |> go
| `Malformed err -> `Error (false, str "%s." err)
| `End decoder ->
let len = io_buffer_size - Inf.dst_rem decoder in
if len > 0 then bigstring_output oc o 0 len
; `Ok 0 in
go decoder
let now () = Int32.of_float (Unix.gettimeofday ())
let run_gzip_deflate ~level ic oc =
let open Gz in
let encoder =
Def.encoder `Manual `Manual ~q ~w:l ~level ~mtime:(now ()) Gz.Unix in
let rec go encoder =
match Def.encode encoder with
| `Await encoder ->
let len = bigstring_input ic i 0 io_buffer_size in
Def.src encoder i 0 len |> go
| `Flush encoder ->
let len = io_buffer_size - Def.dst_rem encoder in
bigstring_output oc o 0 len
; Def.dst encoder o 0 io_buffer_size |> go
| `End encoder ->
let len = io_buffer_size - Def.dst_rem encoder in
if len > 0 then bigstring_output oc o 0 len
; `Ok 0 in
Def.dst encoder o 0 io_buffer_size |> go
external string_get_uint32 : string -> int -> int32 = "%caml_string_get32"
external bigstring_set_uint32 : Lzo.bigstring -> int -> int32 -> unit
= "%caml_bigstring_set32"
let string_get_uint8 str idx = Char.code (String.get str idx)
external bigstring_set_uint8 : Lzo.bigstring -> int -> int -> unit
= "%caml_ba_set_1"
let run_lzo_deflate ic oc =
let wrkmem = Lzo.make_wrkmem () in
let in_contents =
let buf = Buffer.create 0x1000 in
let tmp = Bytes.create 0x100 in
let rec go () =
match input ic tmp 0 (Bytes.length tmp) with
| 0 -> Buffer.contents buf
| len ->
Buffer.add_subbytes buf tmp 0 len
; go ()
| exception End_of_file -> Buffer.contents buf in
go () in
let in_contents =
let len = String.length in_contents in
let res = Bigarray.Array1.create Bigarray.char Bigarray.c_layout len in
let len0 = len land 3 in
let len1 = len asr 2 in
for i = 0 to len1 - 1 do
let i = i * 4 in
let v = string_get_uint32 in_contents i in
bigstring_set_uint32 res i v
done
; for i = 0 to len0 - 1 do
let i = (len1 * 4) + i in
let v = string_get_uint8 in_contents i in
bigstring_set_uint8 res i v
done
; res in
let out_contents =
Bigarray.(Array1.create char c_layout (Array1.dim in_contents * 2)) in
match Lzo.compress in_contents out_contents wrkmem with
| len ->
bigstring_output oc out_contents 0 len
; `Ok 0
| exception Invalid_argument _ -> assert false
let run_lzo_inflate ic oc =
let in_contents =
let buf = Buffer.create 0x1000 in
let tmp = Bytes.create 0x100 in
let rec go () =
match input ic tmp 0 (Bytes.length tmp) with
| 0 -> Buffer.contents buf
| len ->
Buffer.add_subbytes buf tmp 0 len
; go ()
| exception End_of_file -> Buffer.contents buf in
go () in
let in_contents =
let len = String.length in_contents in
let res = Bigarray.Array1.create Bigarray.char Bigarray.c_layout len in
let len0 = len land 3 in
let len1 = len asr 2 in
for i = 0 to len1 - 1 do
let i = i * 4 in
let v = string_get_uint32 in_contents i in
bigstring_set_uint32 res i v
done
; for i = 0 to len0 - 1 do
let i = (len1 * 4) + i in
let v = string_get_uint8 in_contents i in
bigstring_set_uint8 res i v
done
; res in
match Lzo.uncompress_with_buffer in_contents with
| Ok str -> output_string oc str ; `Ok 0
| Error err -> `Error (false, str "%a." Lzo.pp_error err)
let run deflate format level filename_ic filename_oc =
let ic, close_ic =
match filename_ic with
| Some filename ->
let ic = open_in_bin filename in
ic, fun () -> close_in ic
| None -> stdin, ignore in
let oc, close_oc =
match filename_oc with
| Some filename ->
let oc = open_out_bin filename in
oc, fun () -> close_out oc
| None -> stdout, ignore in
let res =
match deflate, format with
| true, `Deflate -> run_deflate ~level ic oc
| false, `Deflate -> run_inflate ic oc
| true, `Zlib -> run_zlib_deflate ~level ic oc
| false, `Zlib -> run_zlib_inflate ic oc
| true, `Gzip -> run_gzip_deflate ~level ic oc
| false, `Gzip -> run_gzip_inflate ic oc
| true, `Lzo -> run_lzo_deflate ic oc
| false, `Lzo -> run_lzo_inflate ic oc in
close_ic () ; close_oc () ; res
open Cmdliner
let deflate =
let doc = "Ask to deflate inputs (instead of inflate)." in
Arg.(value & flag & info ["d"] ~doc)
let format =
let parser s =
match String.lowercase_ascii s with
| "zlib" -> Ok `Zlib
| "gzip" -> Ok `Gzip
| "deflate" -> Ok `Deflate
| "lzo" -> Ok `Lzo
| x -> error_msgf "Invalid format: %S" x in
let pp ppf = function
| `Zlib -> Format.pp_print_string ppf "zlib"
| `Gzip -> Format.pp_print_string ppf "gzip"
| `Deflate -> Format.pp_print_string ppf "deflate"
| `Lzo -> Format.pp_print_string ppf "lzo" in
let format = Arg.conv (parser, pp) in
Arg.(value & opt format `Deflate & info ["f"; "format"] ~docv:"<format>")
let input = Arg.(value & pos 0 (some file) None & info [] ~docv:"<filename>")
let output = Arg.(value & pos 1 (some string) None & info [] ~docv:"<filename>")
let level =
let parser str =
match int_of_string str with
| n when n >= 0 -> Ok n
| _ -> Error (`Msg "The compression level must be positive")
| exception _ -> Error (`Msg "Invalid level") in
let positive_int = Arg.conv (parser, Format.pp_print_int) in
Arg.(value & opt positive_int 4 & info ["l"; "level"] ~docv:"<level>")
let command =
let doc =
"A tool to deflate/inflate a stream/file throught a specified format." in
let man =
[
`S Manpage.s_description
; `P
"$(tname) reads from the standard input and writes the \
deflated/inflated data to the standard output. Several formats \
exists:"
; `I
( "DEFLATE"
, "DEFLATE is a lossless data compression file format that uses a \
combination of LZ77 and Huffman coding. It is specified in RFC 1951 \
<>." ); `Noblank
; `I
( "GZip"
, "GZip is a file format based on the DEFLATE algorithm, which is a \
combination of LZ77 and Huffman coding. It encodes few informations \
such as: the timestamp, the filename, or the operating system \
(which operates the deflation). It generates a CRC-32 checksum at \
the end of the stream. It is described by the RFC 1952 \
<>." ); `Noblank
; `I
( "Zlib"
, "Zlib is an $(i,abstraction) of the DEFLATE algorithm compression \
algorithm which terminates the stream with an ADLER-32 checksum." )
; `Noblank
; `I
( "Lempel-Ziv-Overhumer (LZO)"
, "Lempel-Ziv-Oberhumer is a lossless data compression algorithm that \
is focused on decompression speed." ); `S Manpage.s_examples
; `P
"This is a small example of how to use $(tname) in your favorite shell:"
; `Pre
"\\$ $(tname) -f gzip -d <<EOF > file.gz\n\
Hello World!\n\
EOF\n\
\\$ $(tname) -f gzip < file.gz\n\
Hello World!\n\
\\$"; `S Manpage.s_bugs
; `P "Check bug reports at <>"
] in
let term = Term.(ret (const run $ deflate $ format $ level $ input $ output))
and info = Cmd.info "decompress" ~doc ~man in
Cmd.v info term
let () = exit (Cmd.eval' command)
| null | https://raw.githubusercontent.com/mirage/decompress/716f7e37720b8b9684ed5a1aedc75c69071fd0fc/bin/decompress.ml | ocaml | let w = De.make_window ~bits:15
let l = De.Lz77.make_window ~bits:15
let o = De.bigstring_create De.io_buffer_size
let i = De.bigstring_create De.io_buffer_size
let q = De.Queue.create 4096
let str fmt = Format.asprintf fmt
let msgf fmt = Format.kasprintf (fun msg -> `Msg msg) fmt
let error_msgf fmt = Format.kasprintf (fun err -> Error (`Msg err)) fmt
let bigstring_input ic buf off len =
let tmp = Bytes.create len in
try
let len = input ic tmp 0 len in
for i = 0 to len - 1 do
buf.{off + i} <- Bytes.get tmp i
done
; len
with End_of_file -> 0
let bigstring_output oc buf off len =
let res = Bytes.create len in
for i = 0 to len - 1 do
Bytes.set res i buf.{off + i}
done
; output_string oc (Bytes.unsafe_to_string res)
let run_inflate ic oc =
let open De in
let decoder = Inf.decoder `Manual ~o ~w in
let rec go () =
match Inf.decode decoder with
| `Await ->
let len = bigstring_input ic i 0 io_buffer_size in
Inf.src decoder i 0 len ; go ()
| `Flush ->
let len = io_buffer_size - Inf.dst_rem decoder in
bigstring_output oc o 0 len
; Inf.flush decoder
; go ()
| `Malformed err -> `Error (false, str "%s." err)
| `End ->
let len = io_buffer_size - Inf.dst_rem decoder in
if len > 0 then bigstring_output oc o 0 len
; `Ok 0 in
go ()
let run_deflate ~level ic oc =
let open De in
let state = Lz77.state ~level ~q ~w:l (`Channel ic) in
let encoder = Def.encoder (`Channel oc) ~q in
let rec compress () =
match De.Lz77.compress state with
| `Await -> assert false
| `Flush ->
let literals = Lz77.literals state in
let distances = Lz77.distances state in
encode
@@ Def.encode encoder
(`Block
{
Def.kind=
Dynamic (Def.dynamic_of_frequencies ~literals ~distances)
; last= false
})
| `End ->
Queue.push_exn q Queue.eob
; pending @@ Def.encode encoder (`Block {Def.kind= Fixed; last= true})
and pending = function `Partial | `Block -> assert false | `Ok -> ()
and encode = function
| `Partial -> assert false
| `Ok | `Block -> compress () in
Def.dst encoder o 0 io_buffer_size
; compress ()
; `Ok 0
let run_zlib_inflate ic oc =
let open Zl in
let allocate bits = De.make_window ~bits in
let decoder = Inf.decoder `Manual ~o ~allocate in
let rec go decoder =
match Inf.decode decoder with
| `Await decoder ->
let len = bigstring_input ic i 0 De.io_buffer_size in
Inf.src decoder i 0 len |> go
| `Flush decoder ->
let len = De.io_buffer_size - Inf.dst_rem decoder in
bigstring_output oc o 0 len
; Inf.flush decoder |> go
| `Malformed err -> `Error (false, str "%s." err)
| `End decoder ->
let len = De.io_buffer_size - Inf.dst_rem decoder in
if len > 0 then bigstring_output oc o 0 len
; `Ok 0 in
go decoder
let run_zlib_deflate ~level ic oc =
let open Zl in
let encoder = Def.encoder `Manual `Manual ~q ~w:l ~level in
let rec go encoder =
match Def.encode encoder with
| `Await encoder ->
let len = bigstring_input ic i 0 De.io_buffer_size in
Def.src encoder i 0 len |> go
| `Flush encoder ->
let len = De.io_buffer_size - Def.dst_rem encoder in
bigstring_output oc o 0 len
; Def.dst encoder o 0 De.io_buffer_size |> go
| `End encoder ->
let len = De.io_buffer_size - Def.dst_rem encoder in
if len > 0 then bigstring_output oc o 0 len
; `Ok 0 in
Def.dst encoder o 0 De.io_buffer_size |> go
let run_gzip_inflate ic oc =
let open Gz in
let decoder = Inf.decoder `Manual ~o in
let rec go decoder =
match Inf.decode decoder with
| `Await decoder ->
let len = bigstring_input ic i 0 io_buffer_size in
Inf.src decoder i 0 len |> go
| `Flush decoder ->
let len = io_buffer_size - Inf.dst_rem decoder in
bigstring_output oc o 0 len
; Inf.flush decoder |> go
| `Malformed err -> `Error (false, str "%s." err)
| `End decoder ->
let len = io_buffer_size - Inf.dst_rem decoder in
if len > 0 then bigstring_output oc o 0 len
; `Ok 0 in
go decoder
let now () = Int32.of_float (Unix.gettimeofday ())
let run_gzip_deflate ~level ic oc =
let open Gz in
let encoder =
Def.encoder `Manual `Manual ~q ~w:l ~level ~mtime:(now ()) Gz.Unix in
let rec go encoder =
match Def.encode encoder with
| `Await encoder ->
let len = bigstring_input ic i 0 io_buffer_size in
Def.src encoder i 0 len |> go
| `Flush encoder ->
let len = io_buffer_size - Def.dst_rem encoder in
bigstring_output oc o 0 len
; Def.dst encoder o 0 io_buffer_size |> go
| `End encoder ->
let len = io_buffer_size - Def.dst_rem encoder in
if len > 0 then bigstring_output oc o 0 len
; `Ok 0 in
Def.dst encoder o 0 io_buffer_size |> go
external string_get_uint32 : string -> int -> int32 = "%caml_string_get32"
external bigstring_set_uint32 : Lzo.bigstring -> int -> int32 -> unit
= "%caml_bigstring_set32"
let string_get_uint8 str idx = Char.code (String.get str idx)
external bigstring_set_uint8 : Lzo.bigstring -> int -> int -> unit
= "%caml_ba_set_1"
let run_lzo_deflate ic oc =
let wrkmem = Lzo.make_wrkmem () in
let in_contents =
let buf = Buffer.create 0x1000 in
let tmp = Bytes.create 0x100 in
let rec go () =
match input ic tmp 0 (Bytes.length tmp) with
| 0 -> Buffer.contents buf
| len ->
Buffer.add_subbytes buf tmp 0 len
; go ()
| exception End_of_file -> Buffer.contents buf in
go () in
let in_contents =
let len = String.length in_contents in
let res = Bigarray.Array1.create Bigarray.char Bigarray.c_layout len in
let len0 = len land 3 in
let len1 = len asr 2 in
for i = 0 to len1 - 1 do
let i = i * 4 in
let v = string_get_uint32 in_contents i in
bigstring_set_uint32 res i v
done
; for i = 0 to len0 - 1 do
let i = (len1 * 4) + i in
let v = string_get_uint8 in_contents i in
bigstring_set_uint8 res i v
done
; res in
let out_contents =
Bigarray.(Array1.create char c_layout (Array1.dim in_contents * 2)) in
match Lzo.compress in_contents out_contents wrkmem with
| len ->
bigstring_output oc out_contents 0 len
; `Ok 0
| exception Invalid_argument _ -> assert false
let run_lzo_inflate ic oc =
let in_contents =
let buf = Buffer.create 0x1000 in
let tmp = Bytes.create 0x100 in
let rec go () =
match input ic tmp 0 (Bytes.length tmp) with
| 0 -> Buffer.contents buf
| len ->
Buffer.add_subbytes buf tmp 0 len
; go ()
| exception End_of_file -> Buffer.contents buf in
go () in
let in_contents =
let len = String.length in_contents in
let res = Bigarray.Array1.create Bigarray.char Bigarray.c_layout len in
let len0 = len land 3 in
let len1 = len asr 2 in
for i = 0 to len1 - 1 do
let i = i * 4 in
let v = string_get_uint32 in_contents i in
bigstring_set_uint32 res i v
done
; for i = 0 to len0 - 1 do
let i = (len1 * 4) + i in
let v = string_get_uint8 in_contents i in
bigstring_set_uint8 res i v
done
; res in
match Lzo.uncompress_with_buffer in_contents with
| Ok str -> output_string oc str ; `Ok 0
| Error err -> `Error (false, str "%a." Lzo.pp_error err)
let run deflate format level filename_ic filename_oc =
let ic, close_ic =
match filename_ic with
| Some filename ->
let ic = open_in_bin filename in
ic, fun () -> close_in ic
| None -> stdin, ignore in
let oc, close_oc =
match filename_oc with
| Some filename ->
let oc = open_out_bin filename in
oc, fun () -> close_out oc
| None -> stdout, ignore in
let res =
match deflate, format with
| true, `Deflate -> run_deflate ~level ic oc
| false, `Deflate -> run_inflate ic oc
| true, `Zlib -> run_zlib_deflate ~level ic oc
| false, `Zlib -> run_zlib_inflate ic oc
| true, `Gzip -> run_gzip_deflate ~level ic oc
| false, `Gzip -> run_gzip_inflate ic oc
| true, `Lzo -> run_lzo_deflate ic oc
| false, `Lzo -> run_lzo_inflate ic oc in
close_ic () ; close_oc () ; res
open Cmdliner
let deflate =
let doc = "Ask to deflate inputs (instead of inflate)." in
Arg.(value & flag & info ["d"] ~doc)
let format =
let parser s =
match String.lowercase_ascii s with
| "zlib" -> Ok `Zlib
| "gzip" -> Ok `Gzip
| "deflate" -> Ok `Deflate
| "lzo" -> Ok `Lzo
| x -> error_msgf "Invalid format: %S" x in
let pp ppf = function
| `Zlib -> Format.pp_print_string ppf "zlib"
| `Gzip -> Format.pp_print_string ppf "gzip"
| `Deflate -> Format.pp_print_string ppf "deflate"
| `Lzo -> Format.pp_print_string ppf "lzo" in
let format = Arg.conv (parser, pp) in
Arg.(value & opt format `Deflate & info ["f"; "format"] ~docv:"<format>")
let input = Arg.(value & pos 0 (some file) None & info [] ~docv:"<filename>")
let output = Arg.(value & pos 1 (some string) None & info [] ~docv:"<filename>")
let level =
let parser str =
match int_of_string str with
| n when n >= 0 -> Ok n
| _ -> Error (`Msg "The compression level must be positive")
| exception _ -> Error (`Msg "Invalid level") in
let positive_int = Arg.conv (parser, Format.pp_print_int) in
Arg.(value & opt positive_int 4 & info ["l"; "level"] ~docv:"<level>")
let command =
let doc =
"A tool to deflate/inflate a stream/file throught a specified format." in
let man =
[
`S Manpage.s_description
; `P
"$(tname) reads from the standard input and writes the \
deflated/inflated data to the standard output. Several formats \
exists:"
; `I
( "DEFLATE"
, "DEFLATE is a lossless data compression file format that uses a \
combination of LZ77 and Huffman coding. It is specified in RFC 1951 \
<>." ); `Noblank
; `I
( "GZip"
, "GZip is a file format based on the DEFLATE algorithm, which is a \
combination of LZ77 and Huffman coding. It encodes few informations \
such as: the timestamp, the filename, or the operating system \
(which operates the deflation). It generates a CRC-32 checksum at \
the end of the stream. It is described by the RFC 1952 \
<>." ); `Noblank
; `I
( "Zlib"
, "Zlib is an $(i,abstraction) of the DEFLATE algorithm compression \
algorithm which terminates the stream with an ADLER-32 checksum." )
; `Noblank
; `I
( "Lempel-Ziv-Overhumer (LZO)"
, "Lempel-Ziv-Oberhumer is a lossless data compression algorithm that \
is focused on decompression speed." ); `S Manpage.s_examples
; `P
"This is a small example of how to use $(tname) in your favorite shell:"
; `Pre
"\\$ $(tname) -f gzip -d <<EOF > file.gz\n\
Hello World!\n\
EOF\n\
\\$ $(tname) -f gzip < file.gz\n\
Hello World!\n\
\\$"; `S Manpage.s_bugs
; `P "Check bug reports at <>"
] in
let term = Term.(ret (const run $ deflate $ format $ level $ input $ output))
and info = Cmd.info "decompress" ~doc ~man in
Cmd.v info term
let () = exit (Cmd.eval' command)
| |
04fe40f06e037f91721f7cdca2f1d6c2d2a33cf121bb9b1876570ffe7b6eea60 | ghc/nofib | Main.hs |
- ( The Solid Modeller , written in Haskell )
-
- Copyright 1990,1991,1992,1993 Duncan Sinclair
-
- Permission to use , copy , modify , and distribute this software for any
- purpose and without fee is hereby granted , provided that the above
- copyright notice and this permission notice appear in all copies , and
- that my name not be used in advertising or publicity pertaining to this
- software without specific , written prior permission . I makes no
- representations about the suitability of this software for any purpose .
- It is provided ` ` as is '' without express or implied warranty .
-
- Duncan Sinclair 1993 .
-
- Main program .
-
- Fulsom (The Solid Modeller, written in Haskell)
-
- Copyright 1990,1991,1992,1993 Duncan Sinclair
-
- Permission to use, copy, modify, and distribute this software for any
- purpose and without fee is hereby granted, provided that the above
- copyright notice and this permission notice appear in all copies, and
- that my name not be used in advertising or publicity pertaining to this
- software without specific, written prior permission. I makes no
- representations about the suitability of this software for any purpose.
- It is provided ``as is'' without express or implied warranty.
-
- Duncan Sinclair 1993.
-
- Main program.
-
-}
module Main(main) where
import Shapes
import Raster
import Quad
import Oct
import Csg
import Interval
import Types
import Vector
import Kolor
import Matrix
import Patchlevel
import Control.Monad
import System.Environment
import System.IO
import NofibUtils
main = replicateM_ 1000 $ do
argv <- getArgs
let
n = case argv of
[a] -> read a
_ -> 7
hSetBinaryMode stdout True
print (hash (picture n))
picture n = go n pic
go :: Int -> Csg -> [Char]
go n = (cdraw n) . quadoct . (octcsg n)
| null | https://raw.githubusercontent.com/ghc/nofib/f34b90b5a6ce46284693119a06d1133908b11856/real/fulsom/Main.hs | haskell |
- ( The Solid Modeller , written in Haskell )
-
- Copyright 1990,1991,1992,1993 Duncan Sinclair
-
- Permission to use , copy , modify , and distribute this software for any
- purpose and without fee is hereby granted , provided that the above
- copyright notice and this permission notice appear in all copies , and
- that my name not be used in advertising or publicity pertaining to this
- software without specific , written prior permission . I makes no
- representations about the suitability of this software for any purpose .
- It is provided ` ` as is '' without express or implied warranty .
-
- Duncan Sinclair 1993 .
-
- Main program .
-
- Fulsom (The Solid Modeller, written in Haskell)
-
- Copyright 1990,1991,1992,1993 Duncan Sinclair
-
- Permission to use, copy, modify, and distribute this software for any
- purpose and without fee is hereby granted, provided that the above
- copyright notice and this permission notice appear in all copies, and
- that my name not be used in advertising or publicity pertaining to this
- software without specific, written prior permission. I makes no
- representations about the suitability of this software for any purpose.
- It is provided ``as is'' without express or implied warranty.
-
- Duncan Sinclair 1993.
-
- Main program.
-
-}
module Main(main) where
import Shapes
import Raster
import Quad
import Oct
import Csg
import Interval
import Types
import Vector
import Kolor
import Matrix
import Patchlevel
import Control.Monad
import System.Environment
import System.IO
import NofibUtils
main = replicateM_ 1000 $ do
argv <- getArgs
let
n = case argv of
[a] -> read a
_ -> 7
hSetBinaryMode stdout True
print (hash (picture n))
picture n = go n pic
go :: Int -> Csg -> [Char]
go n = (cdraw n) . quadoct . (octcsg n)
| |
73cd31bd7e49b1b030245c60ccedad6694c032a87996585eaff72c6b7d91e5ea | rmloveland/scheme48-0.53 | test.scm | Copyright ( c ) 1993 - 1999 by and . See file COPYING .
; ,config ,load debug/test.scm
(define-structure testing (export (test :syntax) lost?)
(open scheme signals handle conditions)
(begin
(define *lost?* #f)
(define (lost?) *lost?*)
(define (run-test string compare want thunk)
(let ((result
(call-with-current-continuation
(lambda (k)
(with-handler (lambda (condition punt)
(if (error? condition)
(k condition)
(punt)))
thunk)))))
(if (not (compare want result))
(begin (display "Test ") (write string) (display " failed.") (newline)
(display "Wanted ") (write want)
(display ", but got ") (write result) (display ".")
(newline)
(set! *lost?* #t)))))
(define-syntax test
(syntax-rules ()
((test ?string ?compare ?want ?exp)
(run-test ?string ?compare ?want (lambda () ?exp)))))
))
| null | https://raw.githubusercontent.com/rmloveland/scheme48-0.53/1ae4531fac7150bd2af42d124da9b50dd1b89ec1/scheme/debug/test.scm | scheme | ,config ,load debug/test.scm | Copyright ( c ) 1993 - 1999 by and . See file COPYING .
(define-structure testing (export (test :syntax) lost?)
(open scheme signals handle conditions)
(begin
(define *lost?* #f)
(define (lost?) *lost?*)
(define (run-test string compare want thunk)
(let ((result
(call-with-current-continuation
(lambda (k)
(with-handler (lambda (condition punt)
(if (error? condition)
(k condition)
(punt)))
thunk)))))
(if (not (compare want result))
(begin (display "Test ") (write string) (display " failed.") (newline)
(display "Wanted ") (write want)
(display ", but got ") (write result) (display ".")
(newline)
(set! *lost?* #t)))))
(define-syntax test
(syntax-rules ()
((test ?string ?compare ?want ?exp)
(run-test ?string ?compare ?want (lambda () ?exp)))))
))
|
9b1ae7442e0036a95673cce68e1e733e50b4ecf1cde1c0e38f01707272387c73 | haskell-tools/haskell-tools | Simplest.hs | module Refactor.InlineBinding.Simplest where
b = a
a = () | null | https://raw.githubusercontent.com/haskell-tools/haskell-tools/b1189ab4f63b29bbf1aa14af4557850064931e32/src/builtin-refactorings/examples/Refactor/InlineBinding/Simplest.hs | haskell | module Refactor.InlineBinding.Simplest where
b = a
a = () | |
5fca09de7b3b1f34f85aea4ed329a8dde0d899e920c6bd6719bb8827042437a0 | nojb/llvm-min-caml | gc.ml | open Prim
type atom =
| Var of Id.t
| Root of Id.t
type closure = { entry : Id.l * Type.t; actual_fv : atom list }
type t =
| Unit
| Bool of bool
| Int of int
| Float of float
| Prim of primitive * atom list
| If of atom * t * t
| Let of (atom * Type.t) * t * t
| Atom of atom
| MakeCls of closure
| AppCls of atom * atom list
| AppDir of Id.l * atom list
| LetTuple of (atom * Type.t) list * atom * t
| ExtArray of Id.l * Type.t
| ExtFunApp of Id.l * Type.t * atom list
let rec triggers = function
| Closure.Unit | Closure.Bool(_) | Closure.Int(_)
| Closure.Float(_) -> false
| Closure.Prim (Pmakearray, _)
| Closure.Prim (Pmaketuple, _) -> true
| Closure.Prim _ -> false
| Closure.If (_, e1, e2)
| Closure.Let (_, e1, e2) -> triggers e1 || triggers e2
| Closure.Var _ -> false
| Closure.MakeCls _ -> true
| Closure.AppCls _ | Closure.AppDir _ -> false
| Closure.LetTuple (_, _, e) -> triggers e
| Closure.ExtArray _ -> false
| Closure.ExtFunApp _ -> false
(* is this right? could an external function allocate memory!? *)
let remove_list l s =
List.fold_right S.remove l s
let rec roots e =
match e with
| Closure.Unit | Closure.Bool _ | Closure.Int _
| Closure.Float _ -> S.empty
| Closure.Prim (Pmakearray, xs)
| Closure.Prim (Pmaketuple, xs) -> S.of_list xs
| Closure.Prim _ -> S.empty
| Closure.If (_, e1, e2) ->
S.union (roots e1) (roots e2)
| Closure.Let ((id, _), e1, e2) when triggers e1 ->
S.remove id (S.union (roots e1) (Closure.fv e2))
| Closure.Let ((id, _), _, e2) ->
S.remove id (roots e2)
| Closure.Var _ -> S.empty
| Closure.MakeCls (clos) ->
S.of_list clos.Closure.actual_fv
| Closure.AppCls _ | Closure.AppDir _ -> S.empty
| Closure.LetTuple (idtl, _, e) ->
remove_list (List.map (fun (id, _) -> id) idtl) (roots e)
| Closure.ExtArray _ | Closure.ExtFunApp _ -> S.empty
let is_gc_type = function
| Type.Fun _ | Type.Tuple _ | Type.Array _ -> true
| _ -> false
let add x t env r =
if S.mem x r && is_gc_type t then
(Printf.eprintf "%s " x;
(Root x, M.add x true env))
else
(Var x, M.add x false env)
let g_atom env id =
let is_root = M.find id env in
if is_root then Root id
else Var id
let rec g env = function
| Closure.Unit -> Unit
| Closure.Bool(b) -> Bool(b)
| Closure.Int(n) -> Int(n)
| Closure.Float(f) -> Float(f)
| Closure.Prim (p, xs) -> Prim (p, List.map (g_atom env) xs)
| Closure.If (x, e1, e2) ->
If (Var x, g env e1, g env e2)
| Closure.Let ((x, t), e1, e2) ->
let v, env' = add x t env (roots e2) in
Let ((v, t), g env e1, g env' e2)
| Closure.Var (x) ->
Atom (g_atom env x)
| Closure.MakeCls (clos) ->
MakeCls ({ entry = clos.Closure.entry; actual_fv =
List.map (g_atom env) clos.Closure.actual_fv })
| Closure.AppCls (x, idl) ->
AppCls (g_atom env x, List.map (g_atom env) idl)
| Closure.AppDir (x, idl) ->
AppDir (x, List.map (g_atom env) idl)
| Closure.LetTuple (idtl, x, e) ->
let x = g_atom env x in
let r = roots e in
let rec loop env yl = function
| [] ->
LetTuple (List.rev yl, x, g env e)
| (id, t) :: rest ->
let v, env' = add id t env r in
loop env' ((v, t) :: yl) rest
in loop env [] idtl
| Closure.ExtArray (x, t) ->
ExtArray (x, t)
| Closure.ExtFunApp (x, t, idl) ->
ExtFunApp (x, t, List.map (g_atom env) idl)
type fundef = { name : Id.l * Type.t;
args : (atom * Type.t) list;
formal_fv : (atom * Type.t) list;
body : t }
type prog = Prog of fundef list * t
let f_fundef fundef =
let r = roots fundef.Closure.body in
( let Id. L name , _ = fundef.Closure.name in
Printf.eprintf " roots for % s : % s\n " name
( String.concat " " ( S.elements r ) ) ) ;
Printf.eprintf "roots for %s: %s\n" name
(String.concat " " (S.elements r))); *)
let rec loop env yl = function
| [] ->
List.rev yl, env
| (x, t) :: rest ->
let v, env = add x t env r in
loop env ((v, t) :: yl) rest in
let Id.L name, _ = fundef.Closure.name in
Printf.eprintf "Roots for %s: " name;
let args, env = loop M.empty [] fundef.Closure.args in
let formal_fv, env = loop env [] fundef.Closure.formal_fv in
let body = g env fundef.Closure.body in
Printf.eprintf "\n";
{ name = fundef.Closure.name;
args = args; formal_fv = formal_fv;
body = body }
let f (Closure.Prog(fundefs, e)) =
Prog (List.map f_fundef fundefs, g M.empty e)
| null | https://raw.githubusercontent.com/nojb/llvm-min-caml/68703b905f8292cb2e20b41bbd90cfea85ca2a19/gc.ml | ocaml | is this right? could an external function allocate memory!? | open Prim
type atom =
| Var of Id.t
| Root of Id.t
type closure = { entry : Id.l * Type.t; actual_fv : atom list }
type t =
| Unit
| Bool of bool
| Int of int
| Float of float
| Prim of primitive * atom list
| If of atom * t * t
| Let of (atom * Type.t) * t * t
| Atom of atom
| MakeCls of closure
| AppCls of atom * atom list
| AppDir of Id.l * atom list
| LetTuple of (atom * Type.t) list * atom * t
| ExtArray of Id.l * Type.t
| ExtFunApp of Id.l * Type.t * atom list
let rec triggers = function
| Closure.Unit | Closure.Bool(_) | Closure.Int(_)
| Closure.Float(_) -> false
| Closure.Prim (Pmakearray, _)
| Closure.Prim (Pmaketuple, _) -> true
| Closure.Prim _ -> false
| Closure.If (_, e1, e2)
| Closure.Let (_, e1, e2) -> triggers e1 || triggers e2
| Closure.Var _ -> false
| Closure.MakeCls _ -> true
| Closure.AppCls _ | Closure.AppDir _ -> false
| Closure.LetTuple (_, _, e) -> triggers e
| Closure.ExtArray _ -> false
| Closure.ExtFunApp _ -> false
let remove_list l s =
List.fold_right S.remove l s
let rec roots e =
match e with
| Closure.Unit | Closure.Bool _ | Closure.Int _
| Closure.Float _ -> S.empty
| Closure.Prim (Pmakearray, xs)
| Closure.Prim (Pmaketuple, xs) -> S.of_list xs
| Closure.Prim _ -> S.empty
| Closure.If (_, e1, e2) ->
S.union (roots e1) (roots e2)
| Closure.Let ((id, _), e1, e2) when triggers e1 ->
S.remove id (S.union (roots e1) (Closure.fv e2))
| Closure.Let ((id, _), _, e2) ->
S.remove id (roots e2)
| Closure.Var _ -> S.empty
| Closure.MakeCls (clos) ->
S.of_list clos.Closure.actual_fv
| Closure.AppCls _ | Closure.AppDir _ -> S.empty
| Closure.LetTuple (idtl, _, e) ->
remove_list (List.map (fun (id, _) -> id) idtl) (roots e)
| Closure.ExtArray _ | Closure.ExtFunApp _ -> S.empty
let is_gc_type = function
| Type.Fun _ | Type.Tuple _ | Type.Array _ -> true
| _ -> false
let add x t env r =
if S.mem x r && is_gc_type t then
(Printf.eprintf "%s " x;
(Root x, M.add x true env))
else
(Var x, M.add x false env)
let g_atom env id =
let is_root = M.find id env in
if is_root then Root id
else Var id
let rec g env = function
| Closure.Unit -> Unit
| Closure.Bool(b) -> Bool(b)
| Closure.Int(n) -> Int(n)
| Closure.Float(f) -> Float(f)
| Closure.Prim (p, xs) -> Prim (p, List.map (g_atom env) xs)
| Closure.If (x, e1, e2) ->
If (Var x, g env e1, g env e2)
| Closure.Let ((x, t), e1, e2) ->
let v, env' = add x t env (roots e2) in
Let ((v, t), g env e1, g env' e2)
| Closure.Var (x) ->
Atom (g_atom env x)
| Closure.MakeCls (clos) ->
MakeCls ({ entry = clos.Closure.entry; actual_fv =
List.map (g_atom env) clos.Closure.actual_fv })
| Closure.AppCls (x, idl) ->
AppCls (g_atom env x, List.map (g_atom env) idl)
| Closure.AppDir (x, idl) ->
AppDir (x, List.map (g_atom env) idl)
| Closure.LetTuple (idtl, x, e) ->
let x = g_atom env x in
let r = roots e in
let rec loop env yl = function
| [] ->
LetTuple (List.rev yl, x, g env e)
| (id, t) :: rest ->
let v, env' = add id t env r in
loop env' ((v, t) :: yl) rest
in loop env [] idtl
| Closure.ExtArray (x, t) ->
ExtArray (x, t)
| Closure.ExtFunApp (x, t, idl) ->
ExtFunApp (x, t, List.map (g_atom env) idl)
type fundef = { name : Id.l * Type.t;
args : (atom * Type.t) list;
formal_fv : (atom * Type.t) list;
body : t }
type prog = Prog of fundef list * t
let f_fundef fundef =
let r = roots fundef.Closure.body in
( let Id. L name , _ = fundef.Closure.name in
Printf.eprintf " roots for % s : % s\n " name
( String.concat " " ( S.elements r ) ) ) ;
Printf.eprintf "roots for %s: %s\n" name
(String.concat " " (S.elements r))); *)
let rec loop env yl = function
| [] ->
List.rev yl, env
| (x, t) :: rest ->
let v, env = add x t env r in
loop env ((v, t) :: yl) rest in
let Id.L name, _ = fundef.Closure.name in
Printf.eprintf "Roots for %s: " name;
let args, env = loop M.empty [] fundef.Closure.args in
let formal_fv, env = loop env [] fundef.Closure.formal_fv in
let body = g env fundef.Closure.body in
Printf.eprintf "\n";
{ name = fundef.Closure.name;
args = args; formal_fv = formal_fv;
body = body }
let f (Closure.Prog(fundefs, e)) =
Prog (List.map f_fundef fundefs, g M.empty e)
|
cd9a64b124d6b6ce605b2d168da821774b7da4461e69ffe9cafef07d2b1eaae2 | igarnier/prbnmcn-dagger | lmh_incremental_inference.ml | module Trace = struct
type sample =
| Sample :
{ uid : int;
dist : 'a Dist.dist;
var : 'a Cgraph.Var.t;
score : Log_space.t
}
-> sample
| Kernel_sample :
{ uid : int;
dist : 'a Dist.kernel;
var : ('a * 'a) Cgraph.Var.t;
score : Log_space.t
}
-> sample
type sample_trace = sample list
type score = Score of { uid : int; score : Log_space.t }
type score_trace = score list
type t = { samples : sample_trace; scores : score_trace }
let empty = { samples = []; scores = [] }
let uid = function Sample { uid; _ } | Kernel_sample { uid; _ } -> uid
[@@inline]
let score = function
| Sample { score; _ } | Kernel_sample { score; _ } -> score
[@@inline]
module Internal_for_tests = struct
let rec equal_trace (trace1 : sample_trace) (trace2 : sample_trace) =
match (trace1, trace2) with
| ([], []) -> true
| ([], _) | (_, []) -> false
| (s1 :: tl1, s2 :: tl2) -> uid s1 = uid s2 && equal_trace tl1 tl2
let pp fmtr trace =
let open Format in
pp_print_list
~pp_sep:(fun fmtr () -> fprintf fmtr ", ")
(fun fmtr s ->
let uid = uid s in
fprintf fmtr "{%d}" uid)
fmtr
trace
end
[@@ocaml.warning "-32"]
let total_sample trace =
let rec loop list acc =
match list with
| [] -> acc
| hd :: tl -> loop tl (Log_space.mul (score hd) acc)
in
loop trace Log_space.one
let total_score trace =
let rec loop list acc =
match list with
| [] -> acc
| Score { score; _ } :: tl -> loop tl (Log_space.mul score acc)
in
loop trace Log_space.one
let total trace =
let total_sampling_score = total_sample trace.samples in
( total_sampling_score,
Log_space.mul total_sampling_score (total_score trace.scores) )
let cardinal { samples; _ } = List.length samples
let rec add_sample s trace =
match trace with
| [] -> [s]
| (Kernel_sample { uid = uid'; _ } as hd) :: tl
| (Sample { uid = uid'; _ } as hd) :: tl ->
let uid = uid s in
if uid < uid' then hd :: add_sample s tl
else if uid > uid' then s :: trace
else trace
let rec add_score (Score { uid; _ } as s) trace =
match trace with
| [] -> [s]
| (Score { uid = uid'; _ } as hd) :: tl ->
if uid < uid' then hd :: add_score s tl
else if uid > uid' then s :: trace
else trace
let add_sample (s : sample) trace =
{ trace with samples = add_sample s trace.samples }
let add_score (s : score) trace =
{ trace with scores = add_score s trace.scores }
let rec intersect_samples trace1 trace2 =
match (trace1, trace2) with
| ([], _) | (_, []) -> []
| (s1 :: tl1, s2 :: tl2) ->
let uid1 = uid s1 in
let uid2 = uid s2 in
if uid1 < uid2 then intersect_samples trace1 tl2
else if uid1 > uid2 then intersect_samples tl1 trace2
else s1 :: intersect_samples tl1 tl2
[@@ocaml.warning "-32"]
let rec union_samples trace1 trace2 =
match (trace1, trace2) with
| ([], t) | (t, []) -> t
| (s1 :: tl1, s2 :: tl2) ->
let uid1 = uid s1 in
let uid2 = uid s2 in
if uid1 < uid2 then s2 :: union_samples trace1 tl2
else if uid1 > uid2 then s1 :: union_samples tl1 trace2
else (* assert s1 = s2 *)
s1 :: union_samples tl1 tl2
let rec union_scores trace1 trace2 =
match (trace1, trace2) with
| ([], t) | (t, []) -> t
| ( (Score { uid = uid1; _ } as s1) :: tl1,
(Score { uid = uid2; _ } as s2) :: tl2 ) ->
if uid1 < uid2 then s2 :: union_scores trace1 tl2
else if uid1 > uid2 then s1 :: union_scores tl1 trace2
else (* assert s1 = s2 *)
s1 :: union_scores tl1 tl2
[@@ocaml.warning "-32"]
let union t1 t2 =
{ samples = union_samples t1.samples t2.samples;
scores = union_scores t1.scores t2.scores
}
end
module Counter = struct
let x = ref 0
let gen () =
let v = !x in
incr x ;
v
end
module Traced = Traced_monad.Make (Incremental_monad) (Trace)
module Syntax = struct
include Cps_monad.Make (Traced)
include Lmh_generic.Make (Traced)
type 'a t = (unit, 'a) m
type 'a shared = 'a Traced.t
let with_shared (m : 'a t) (f : 'a shared -> 'b t) : 'b t =
fun ~handler ->
let m = (m ~handler).cont (fun x () -> x) () in
{ cont = (fun k () -> (f m ~handler).cont k ()) }
(* This would work for any mappable container. *)
let with_shared_list (ms : 'a t list) (f : 'a shared list -> 'b t) : 'b t =
fun ~handler ->
let ms = List.map (fun m -> (m ~handler).cont (fun x () -> x) ()) ms in
{ cont = (fun k () -> (f ms ~handler).cont k ()) }
let with_shared_array (ms : 'a t array) (f : 'a shared array -> 'b t) : 'b t =
fun ~handler ->
let ms = Array.map (fun m -> (m ~handler).cont (fun x () -> x) ()) ms in
{ cont = (fun k -> (f ms ~handler).cont k) }
let use : 'a shared -> 'a t =
fun node ~handler:_ -> { cont = (fun k -> k node) }
module Make_shared (C : sig
type 'a t
val map : 'a t -> ('a -> 'b) -> 'b t
end) =
struct
let with_shared (ms : 'a t C.t) (f : 'a shared C.t -> 'b t) : 'b t =
fun ~handler ->
let ms = C.map ms (fun m -> (m ~handler).cont (fun x () -> x) ()) in
{ cont = (fun k -> (f ms ~handler).cont k) }
end
module Infix = struct
include Infix
let ( let*! ) = with_shared
let use = use
end
end
let handler : RNG.t -> unit Syntax.handler =
let open Syntax in
fun rng_state ->
{ handler =
(fun (type a) (dist : a Cps_monad.effect) k () ->
match dist with
| Dist dist -> (
match (Cgraph.get dist).value with
| Stateless { sample; ll = _ } ->
let pos = sample rng_state in
let var = Cgraph.Var.create pos in
let node = Cgraph.var var in
let node =
Cgraph.map2 node dist (fun sample dist ->
match dist.value with
| Kernel _ ->
(* A distribution can't dynamically switch from stateless to kernel
(TODO: lift this) *)
failwith
"Lmh_incremental_inference.handler: distribution \
switched from Stateless to Kernel"
| Stateless ({ ll; _ } as d) ->
let score = ll sample in
let uid = Counter.gen () in
Format.printf
* " reevaluating variable , fresh sample : % d@. "
* uid ;
* "reevaluating variable, fresh sample: %d@."
* uid ; *)
let trace =
Trace.add_sample
(Trace.Sample { uid; dist = d; var; score })
dist.trace
in
{ Traced.value = sample; trace })
in
k node ()
| Kernel ({ start; sample; ll = _ } as d) ->
let pos = sample start rng_state in
let var = Cgraph.Var.create (start, pos) in
let node = Cgraph.var var in
let node =
Cgraph.map2 node dist (fun (prev, current) dist ->
match dist.value with
| Stateless _ ->
(* A distribution can't dynamically switch from kernel to stateless
(TODO: lift this) *)
failwith
"Lmh_incremental_inference.handler: distribution \
switched from Kernel to Stateless"
| Kernel { ll; _ } ->
let score = ll prev current in
let uid = Counter.gen () in
let trace =
Trace.add_sample
(Trace.Kernel_sample
{ uid; dist = d; var; score })
dist.trace
in
{ Traced.value = current; trace })
in
k node ())
| Score m ->
k
(Cgraph.map m (fun { Traced.value = (value, score); trace } ->
let uid = Counter.gen () in
{ Traced.value;
trace =
Trace.add_score (Trace.Score { uid; score }) trace
}))
()
| _unknown_effect ->
invalid_arg "Lmh_incremental_inference: unknown effect")
}
type processed_trace =
{ trace : Trace.t;
card : int;
samples : Trace.sample array Lazy.t;
sampling_score : Log_space.t;
score : Log_space.t
}
let to_dot fname (model : 'a Syntax.t) =
let oc = open_out fname in
let handler = handler (RNG.make [| 0x1337; 0x533D |]) in
Cgraph.Internal.set_debug true ;
let model = (model ~handler).cont (fun x () -> x) () in
let _ = Cgraph.get model in
Cgraph.Internal.set_debug false ;
Cgraph.Internal.(to_dot ~mode:Full (Cgraph.ex (Obj.magic model)) oc) ;
close_out oc
let process : Trace.t -> processed_trace =
fun trace ->
let samples = lazy (Array.of_list trace.samples) in
let card = Trace.cardinal trace in
let (sampling_score, score) = Trace.total trace in
{ trace; card; samples; sampling_score; score }
let stream_samples (type a) (v : a Syntax.t) rng_state : a Seq.t =
let handler = handler rng_state in
let v = (v ~handler).cont (fun x () -> x) () in
let select_resampling ({ samples; card; _ } : processed_trace) rng_state =
if card = 0 then None
else
let samples = Lazy.force samples in
Some samples.(RNG.int rng_state card)
[@@inline]
in
let run (v : a Traced.t) = Cgraph.get v in
let { Traced.value = first_value; trace = first_trace } = run v in
let mcmc_move prev_value prev_trace _fwd_ll _bwd_ll undo =
let { Traced.value = new_value; trace = new_trace } = Cgraph.get v in
let new_trace = process new_trace in
let intersection =
Trace.intersect_samples prev_trace.trace.samples new_trace.trace.samples
in
Format.printf " old / new intersection : % a@. " Trace.pp intersection ;
let intersection_score = Trace.total_sample intersection in
let forward_sampling_score =
Log_space.mul
Log_space.one
(Log_space.div new_trace.sampling_score intersection_score)
in
let backward_sampling_score =
Log_space.mul
Log_space.one
(Log_space.div prev_trace.sampling_score intersection_score)
in
let forward_flow =
Log_space.(
mul
prev_trace.score
(mul
(of_float (1. /. float_of_int prev_trace.card))
forward_sampling_score))
in
let backward_flow =
Log_space.(
mul
new_trace.score
(mul
(of_float (1. /. float_of_int new_trace.card))
backward_sampling_score))
in
let ratio = Log_space.div backward_flow forward_flow in
let acceptance = Log_space.(min one ratio) in
if Log_space.lt (Log_space.of_float (RNG.float rng_state 1.0)) acceptance
then (new_value, new_trace)
else
let () = Cgraph.undo undo in
(prev_value, prev_trace)
in
let sample_step (prev_value : a) (prev_trace : processed_trace) rng_state =
match select_resampling prev_trace rng_state with
| None -> (prev_value, prev_trace)
| Some (Trace.Kernel_sample { uid = _; dist; var; score = _ }) ->
let (_previous, current) = Cgraph.Var.peek var in
let sample = dist.sample current rng_state in
let fwd_ll = dist.ll current sample in
let undo = Cgraph.Var.set_with_undo var (current, sample) in
let bwd_ll = dist.ll sample current in
mcmc_move prev_value prev_trace fwd_ll bwd_ll undo
| Some (Trace.Sample { uid = _; dist; var; score = bwd_ll }) ->
let (undo, fwd_ll) =
let sample = dist.sample rng_state in
let ll = dist.ll sample in
let undo = Cgraph.Var.set_with_undo var sample in
(undo, ll)
in
mcmc_move prev_value prev_trace fwd_ll bwd_ll undo
in
Seq.unfold
(fun (prev_value, prev_trace) ->
let next = sample_step prev_value prev_trace rng_state in
Some (prev_value, next))
(first_value, process first_trace)
include Syntax
module List_ops = Foldable.Make_list (Syntax)
module Seq_ops = Foldable.Make_seq (Syntax)
module Array_ops = Foldable.Make_array (Syntax)
| null | https://raw.githubusercontent.com/igarnier/prbnmcn-dagger/360e196be3e28cbcc67691a1fd68f1cd93743e35/src/lmh_incremental_inference.ml | ocaml | assert s1 = s2
assert s1 = s2
This would work for any mappable container.
A distribution can't dynamically switch from stateless to kernel
(TODO: lift this)
A distribution can't dynamically switch from kernel to stateless
(TODO: lift this) | module Trace = struct
type sample =
| Sample :
{ uid : int;
dist : 'a Dist.dist;
var : 'a Cgraph.Var.t;
score : Log_space.t
}
-> sample
| Kernel_sample :
{ uid : int;
dist : 'a Dist.kernel;
var : ('a * 'a) Cgraph.Var.t;
score : Log_space.t
}
-> sample
type sample_trace = sample list
type score = Score of { uid : int; score : Log_space.t }
type score_trace = score list
type t = { samples : sample_trace; scores : score_trace }
let empty = { samples = []; scores = [] }
let uid = function Sample { uid; _ } | Kernel_sample { uid; _ } -> uid
[@@inline]
let score = function
| Sample { score; _ } | Kernel_sample { score; _ } -> score
[@@inline]
module Internal_for_tests = struct
let rec equal_trace (trace1 : sample_trace) (trace2 : sample_trace) =
match (trace1, trace2) with
| ([], []) -> true
| ([], _) | (_, []) -> false
| (s1 :: tl1, s2 :: tl2) -> uid s1 = uid s2 && equal_trace tl1 tl2
let pp fmtr trace =
let open Format in
pp_print_list
~pp_sep:(fun fmtr () -> fprintf fmtr ", ")
(fun fmtr s ->
let uid = uid s in
fprintf fmtr "{%d}" uid)
fmtr
trace
end
[@@ocaml.warning "-32"]
let total_sample trace =
let rec loop list acc =
match list with
| [] -> acc
| hd :: tl -> loop tl (Log_space.mul (score hd) acc)
in
loop trace Log_space.one
let total_score trace =
let rec loop list acc =
match list with
| [] -> acc
| Score { score; _ } :: tl -> loop tl (Log_space.mul score acc)
in
loop trace Log_space.one
let total trace =
let total_sampling_score = total_sample trace.samples in
( total_sampling_score,
Log_space.mul total_sampling_score (total_score trace.scores) )
let cardinal { samples; _ } = List.length samples
let rec add_sample s trace =
match trace with
| [] -> [s]
| (Kernel_sample { uid = uid'; _ } as hd) :: tl
| (Sample { uid = uid'; _ } as hd) :: tl ->
let uid = uid s in
if uid < uid' then hd :: add_sample s tl
else if uid > uid' then s :: trace
else trace
let rec add_score (Score { uid; _ } as s) trace =
match trace with
| [] -> [s]
| (Score { uid = uid'; _ } as hd) :: tl ->
if uid < uid' then hd :: add_score s tl
else if uid > uid' then s :: trace
else trace
let add_sample (s : sample) trace =
{ trace with samples = add_sample s trace.samples }
let add_score (s : score) trace =
{ trace with scores = add_score s trace.scores }
let rec intersect_samples trace1 trace2 =
match (trace1, trace2) with
| ([], _) | (_, []) -> []
| (s1 :: tl1, s2 :: tl2) ->
let uid1 = uid s1 in
let uid2 = uid s2 in
if uid1 < uid2 then intersect_samples trace1 tl2
else if uid1 > uid2 then intersect_samples tl1 trace2
else s1 :: intersect_samples tl1 tl2
[@@ocaml.warning "-32"]
let rec union_samples trace1 trace2 =
match (trace1, trace2) with
| ([], t) | (t, []) -> t
| (s1 :: tl1, s2 :: tl2) ->
let uid1 = uid s1 in
let uid2 = uid s2 in
if uid1 < uid2 then s2 :: union_samples trace1 tl2
else if uid1 > uid2 then s1 :: union_samples tl1 trace2
s1 :: union_samples tl1 tl2
let rec union_scores trace1 trace2 =
match (trace1, trace2) with
| ([], t) | (t, []) -> t
| ( (Score { uid = uid1; _ } as s1) :: tl1,
(Score { uid = uid2; _ } as s2) :: tl2 ) ->
if uid1 < uid2 then s2 :: union_scores trace1 tl2
else if uid1 > uid2 then s1 :: union_scores tl1 trace2
s1 :: union_scores tl1 tl2
[@@ocaml.warning "-32"]
let union t1 t2 =
{ samples = union_samples t1.samples t2.samples;
scores = union_scores t1.scores t2.scores
}
end
module Counter = struct
let x = ref 0
let gen () =
let v = !x in
incr x ;
v
end
module Traced = Traced_monad.Make (Incremental_monad) (Trace)
module Syntax = struct
include Cps_monad.Make (Traced)
include Lmh_generic.Make (Traced)
type 'a t = (unit, 'a) m
type 'a shared = 'a Traced.t
let with_shared (m : 'a t) (f : 'a shared -> 'b t) : 'b t =
fun ~handler ->
let m = (m ~handler).cont (fun x () -> x) () in
{ cont = (fun k () -> (f m ~handler).cont k ()) }
let with_shared_list (ms : 'a t list) (f : 'a shared list -> 'b t) : 'b t =
fun ~handler ->
let ms = List.map (fun m -> (m ~handler).cont (fun x () -> x) ()) ms in
{ cont = (fun k () -> (f ms ~handler).cont k ()) }
let with_shared_array (ms : 'a t array) (f : 'a shared array -> 'b t) : 'b t =
fun ~handler ->
let ms = Array.map (fun m -> (m ~handler).cont (fun x () -> x) ()) ms in
{ cont = (fun k -> (f ms ~handler).cont k) }
let use : 'a shared -> 'a t =
fun node ~handler:_ -> { cont = (fun k -> k node) }
module Make_shared (C : sig
type 'a t
val map : 'a t -> ('a -> 'b) -> 'b t
end) =
struct
let with_shared (ms : 'a t C.t) (f : 'a shared C.t -> 'b t) : 'b t =
fun ~handler ->
let ms = C.map ms (fun m -> (m ~handler).cont (fun x () -> x) ()) in
{ cont = (fun k -> (f ms ~handler).cont k) }
end
module Infix = struct
include Infix
let ( let*! ) = with_shared
let use = use
end
end
let handler : RNG.t -> unit Syntax.handler =
let open Syntax in
fun rng_state ->
{ handler =
(fun (type a) (dist : a Cps_monad.effect) k () ->
match dist with
| Dist dist -> (
match (Cgraph.get dist).value with
| Stateless { sample; ll = _ } ->
let pos = sample rng_state in
let var = Cgraph.Var.create pos in
let node = Cgraph.var var in
let node =
Cgraph.map2 node dist (fun sample dist ->
match dist.value with
| Kernel _ ->
failwith
"Lmh_incremental_inference.handler: distribution \
switched from Stateless to Kernel"
| Stateless ({ ll; _ } as d) ->
let score = ll sample in
let uid = Counter.gen () in
Format.printf
* " reevaluating variable , fresh sample : % d@. "
* uid ;
* "reevaluating variable, fresh sample: %d@."
* uid ; *)
let trace =
Trace.add_sample
(Trace.Sample { uid; dist = d; var; score })
dist.trace
in
{ Traced.value = sample; trace })
in
k node ()
| Kernel ({ start; sample; ll = _ } as d) ->
let pos = sample start rng_state in
let var = Cgraph.Var.create (start, pos) in
let node = Cgraph.var var in
let node =
Cgraph.map2 node dist (fun (prev, current) dist ->
match dist.value with
| Stateless _ ->
failwith
"Lmh_incremental_inference.handler: distribution \
switched from Kernel to Stateless"
| Kernel { ll; _ } ->
let score = ll prev current in
let uid = Counter.gen () in
let trace =
Trace.add_sample
(Trace.Kernel_sample
{ uid; dist = d; var; score })
dist.trace
in
{ Traced.value = current; trace })
in
k node ())
| Score m ->
k
(Cgraph.map m (fun { Traced.value = (value, score); trace } ->
let uid = Counter.gen () in
{ Traced.value;
trace =
Trace.add_score (Trace.Score { uid; score }) trace
}))
()
| _unknown_effect ->
invalid_arg "Lmh_incremental_inference: unknown effect")
}
type processed_trace =
{ trace : Trace.t;
card : int;
samples : Trace.sample array Lazy.t;
sampling_score : Log_space.t;
score : Log_space.t
}
let to_dot fname (model : 'a Syntax.t) =
let oc = open_out fname in
let handler = handler (RNG.make [| 0x1337; 0x533D |]) in
Cgraph.Internal.set_debug true ;
let model = (model ~handler).cont (fun x () -> x) () in
let _ = Cgraph.get model in
Cgraph.Internal.set_debug false ;
Cgraph.Internal.(to_dot ~mode:Full (Cgraph.ex (Obj.magic model)) oc) ;
close_out oc
let process : Trace.t -> processed_trace =
fun trace ->
let samples = lazy (Array.of_list trace.samples) in
let card = Trace.cardinal trace in
let (sampling_score, score) = Trace.total trace in
{ trace; card; samples; sampling_score; score }
let stream_samples (type a) (v : a Syntax.t) rng_state : a Seq.t =
let handler = handler rng_state in
let v = (v ~handler).cont (fun x () -> x) () in
let select_resampling ({ samples; card; _ } : processed_trace) rng_state =
if card = 0 then None
else
let samples = Lazy.force samples in
Some samples.(RNG.int rng_state card)
[@@inline]
in
let run (v : a Traced.t) = Cgraph.get v in
let { Traced.value = first_value; trace = first_trace } = run v in
let mcmc_move prev_value prev_trace _fwd_ll _bwd_ll undo =
let { Traced.value = new_value; trace = new_trace } = Cgraph.get v in
let new_trace = process new_trace in
let intersection =
Trace.intersect_samples prev_trace.trace.samples new_trace.trace.samples
in
Format.printf " old / new intersection : % a@. " Trace.pp intersection ;
let intersection_score = Trace.total_sample intersection in
let forward_sampling_score =
Log_space.mul
Log_space.one
(Log_space.div new_trace.sampling_score intersection_score)
in
let backward_sampling_score =
Log_space.mul
Log_space.one
(Log_space.div prev_trace.sampling_score intersection_score)
in
let forward_flow =
Log_space.(
mul
prev_trace.score
(mul
(of_float (1. /. float_of_int prev_trace.card))
forward_sampling_score))
in
let backward_flow =
Log_space.(
mul
new_trace.score
(mul
(of_float (1. /. float_of_int new_trace.card))
backward_sampling_score))
in
let ratio = Log_space.div backward_flow forward_flow in
let acceptance = Log_space.(min one ratio) in
if Log_space.lt (Log_space.of_float (RNG.float rng_state 1.0)) acceptance
then (new_value, new_trace)
else
let () = Cgraph.undo undo in
(prev_value, prev_trace)
in
let sample_step (prev_value : a) (prev_trace : processed_trace) rng_state =
match select_resampling prev_trace rng_state with
| None -> (prev_value, prev_trace)
| Some (Trace.Kernel_sample { uid = _; dist; var; score = _ }) ->
let (_previous, current) = Cgraph.Var.peek var in
let sample = dist.sample current rng_state in
let fwd_ll = dist.ll current sample in
let undo = Cgraph.Var.set_with_undo var (current, sample) in
let bwd_ll = dist.ll sample current in
mcmc_move prev_value prev_trace fwd_ll bwd_ll undo
| Some (Trace.Sample { uid = _; dist; var; score = bwd_ll }) ->
let (undo, fwd_ll) =
let sample = dist.sample rng_state in
let ll = dist.ll sample in
let undo = Cgraph.Var.set_with_undo var sample in
(undo, ll)
in
mcmc_move prev_value prev_trace fwd_ll bwd_ll undo
in
Seq.unfold
(fun (prev_value, prev_trace) ->
let next = sample_step prev_value prev_trace rng_state in
Some (prev_value, next))
(first_value, process first_trace)
include Syntax
module List_ops = Foldable.Make_list (Syntax)
module Seq_ops = Foldable.Make_seq (Syntax)
module Array_ops = Foldable.Make_array (Syntax)
|
3a85cce7465288cc682c115bb46a9f036f9f4de7883d34d7ac10689754dcf1f4 | pixlsus/registry.gimp.org_static | Dot-Map-Pixelize_No-Undo.scm | (script-fu-register
"script-fu-pixelraster-noundo" ;func name
"Brush-Dot-Pixelize-No-Undo" ;menu label
"Creates a color-averaged Dot-Map with a selected Brush\
and a matching masked copy of the active layer." ;description
"Zed Gecko" ;author
"2011, as free as possible" ;copyright notice
"June, 2011" ;date created
"RGB" ;image type that the script works on
SF-IMAGE "Image" 0
SF-DRAWABLE "Layer" 0
SF-BRUSH "Brush" '("Circle (03)" 100 44 0)
SF-ADJUSTMENT "Number of colums" '(10 2 500 1 1 0 0)
SF-ADJUSTMENT "Number of rows" '(10 2 500 1 1 0 0)
)
(script-fu-menu-register "script-fu-pixelraster-noundo" "<Image>/Filters/Pixelize")
(define (script-fu-pixelraster-noundo inImage inLayer inBrush inColums inRows)
(let*
(
(theLayerA
(car
(gimp-layer-new
inImage
(car (gimp-image-width inImage))
(car (gimp-image-height inImage))
RGB-IMAGE
"dot-layer"
60
NORMAL
)
)
)
(theLayerB
(car
(gimp-layer-copy inLayer 0)
)
)
(theMaskB
(car
(gimp-layer-create-mask
theLayerB
1
)
)
)
(theXspacing
(/
(car (gimp-image-width inImage))
inColums
)
)
(theYspacing
(/
(car (gimp-image-height inImage))
inRows
)
)
(theXposition)
(theYposition)
(counterX 0)
(counterY 0)
) ;----------Variablendeklaration Ende
(gimp-image-undo-disable inImage)
(gimp-image-add-layer inImage theLayerB -1)
(gimp-image-add-layer inImage theLayerA -1)
(gimp-layer-add-mask theLayerB theMaskB)
(gimp-layer-add-alpha theLayerA)
(gimp-layer-add-alpha theLayerB)
(gimp-edit-clear theLayerA)
(gimp-drawable-set-name theLayerB "dot-BG-layer")
(gimp-context-set-brush (car inBrush))
(while (< counterX inColums)
(while (< counterY inRows)
(set! theXposition (+ (/ theXspacing 2) (* theXspacing counterX)))
(set! theYposition (+ (/ theYspacing 2) (* theYspacing counterY)))
(gimp-palette-set-foreground (car (gimp-image-pick-color inImage inLayer theXposition theYposition 0 1 (/ theXspacing 2))))
(gimp-paintbrush-default theLayerA 2 (my-float-array theXposition theYposition))
(gimp-palette-set-foreground '(255 255 255))
(gimp-paintbrush-default theMaskB 2 (my-float-array theXposition theYposition))
(set! counterY (+ counterY 1))
)
(set! counterX (+ counterX 1))
(set! counterY 0)
(gimp-progress-update (/ counterX inColums))
)
(gimp-drawable-update theLayerA 0 0 (car (gimp-image-width inImage))(car (gimp-image-height inImage)))
(gimp-drawable-update theMaskB 0 0 (car (gimp-image-width inImage))(car (gimp-image-height inImage)))
(gimp-drawable-update theLayerB 0 0 (car (gimp-image-width inImage))(car (gimp-image-height inImage)))
(gimp-image-undo-enable inImage)
)
)
(define my-float-array
(lambda stuff
(letrec ((kernel (lambda (array pos remainder)
(if (null? remainder) array
(begin
(aset array pos (car remainder))
(kernel array (+ pos 1) (cdr remainder)))))))
(kernel (cons-array (length stuff) 'double) 0 stuff)))) | null | https://raw.githubusercontent.com/pixlsus/registry.gimp.org_static/ffcde7400f402728373ff6579947c6ffe87d1a5e/registry.gimp.org/files/Dot-Map-Pixelize_No-Undo.scm | scheme | func name
menu label
description
author
copyright notice
date created
image type that the script works on
----------Variablendeklaration Ende
| (script-fu-register
"Creates a color-averaged Dot-Map with a selected Brush\
SF-IMAGE "Image" 0
SF-DRAWABLE "Layer" 0
SF-BRUSH "Brush" '("Circle (03)" 100 44 0)
SF-ADJUSTMENT "Number of colums" '(10 2 500 1 1 0 0)
SF-ADJUSTMENT "Number of rows" '(10 2 500 1 1 0 0)
)
(script-fu-menu-register "script-fu-pixelraster-noundo" "<Image>/Filters/Pixelize")
(define (script-fu-pixelraster-noundo inImage inLayer inBrush inColums inRows)
(let*
(
(theLayerA
(car
(gimp-layer-new
inImage
(car (gimp-image-width inImage))
(car (gimp-image-height inImage))
RGB-IMAGE
"dot-layer"
60
NORMAL
)
)
)
(theLayerB
(car
(gimp-layer-copy inLayer 0)
)
)
(theMaskB
(car
(gimp-layer-create-mask
theLayerB
1
)
)
)
(theXspacing
(/
(car (gimp-image-width inImage))
inColums
)
)
(theYspacing
(/
(car (gimp-image-height inImage))
inRows
)
)
(theXposition)
(theYposition)
(counterX 0)
(counterY 0)
(gimp-image-undo-disable inImage)
(gimp-image-add-layer inImage theLayerB -1)
(gimp-image-add-layer inImage theLayerA -1)
(gimp-layer-add-mask theLayerB theMaskB)
(gimp-layer-add-alpha theLayerA)
(gimp-layer-add-alpha theLayerB)
(gimp-edit-clear theLayerA)
(gimp-drawable-set-name theLayerB "dot-BG-layer")
(gimp-context-set-brush (car inBrush))
(while (< counterX inColums)
(while (< counterY inRows)
(set! theXposition (+ (/ theXspacing 2) (* theXspacing counterX)))
(set! theYposition (+ (/ theYspacing 2) (* theYspacing counterY)))
(gimp-palette-set-foreground (car (gimp-image-pick-color inImage inLayer theXposition theYposition 0 1 (/ theXspacing 2))))
(gimp-paintbrush-default theLayerA 2 (my-float-array theXposition theYposition))
(gimp-palette-set-foreground '(255 255 255))
(gimp-paintbrush-default theMaskB 2 (my-float-array theXposition theYposition))
(set! counterY (+ counterY 1))
)
(set! counterX (+ counterX 1))
(set! counterY 0)
(gimp-progress-update (/ counterX inColums))
)
(gimp-drawable-update theLayerA 0 0 (car (gimp-image-width inImage))(car (gimp-image-height inImage)))
(gimp-drawable-update theMaskB 0 0 (car (gimp-image-width inImage))(car (gimp-image-height inImage)))
(gimp-drawable-update theLayerB 0 0 (car (gimp-image-width inImage))(car (gimp-image-height inImage)))
(gimp-image-undo-enable inImage)
)
)
(define my-float-array
(lambda stuff
(letrec ((kernel (lambda (array pos remainder)
(if (null? remainder) array
(begin
(aset array pos (car remainder))
(kernel array (+ pos 1) (cdr remainder)))))))
(kernel (cons-array (length stuff) 'double) 0 stuff)))) |
5b18331b1e6741559778db1619724038aa282c992aadffc1fee2ab69b2333634 | ocaml-sf/learn-ocaml-corpus | get1.ml | let rec get : 'a . int -> 'a seq -> 'a =
fun i xs ->
match xs with
| Nil ->
assert false (* cannot happen; [i] is within bounds *)
| One (x, xs) ->
get (i - 1) (Zero xs) (* wrong *)
| Zero xs ->
let (x0, x1) = get (i / 2) xs in
if i mod 2 = 0 then x0 else x1
| null | https://raw.githubusercontent.com/ocaml-sf/learn-ocaml-corpus/7dcf4d72b49863a3e37e41b3c3097aa4c6101a69/exercises/fpottier/random_access_lists/wrong/get1.ml | ocaml | cannot happen; [i] is within bounds
wrong | let rec get : 'a . int -> 'a seq -> 'a =
fun i xs ->
match xs with
| Nil ->
| One (x, xs) ->
| Zero xs ->
let (x0, x1) = get (i / 2) xs in
if i mod 2 = 0 then x0 else x1
|
273069a8705b2f05dbc829ebec77c212e0fc817896832a59dfa462e7f9de19ff | lisp/de.setf.xml | namespace.lisp | -*- Mode : lisp ; Syntax : ansi - common - lisp ; Base : 10 ; Package : xml - query - data - model ; -*-
(in-package :xml-query-data-model)
(setq xml-query-data-model:*namespace*
(xml-query-data-model:defnamespace "#"
(:use)
(:nicknames)
(:export)
(:documentation nil)))
(let ((xml-query-data-model::p
(or (find-package "#")
(make-package "#"
:use
nil
:nicknames
'nil))))
(dolist (xml-query-data-model::s 'nil)
(export (intern xml-query-data-model::s xml-query-data-model::p)
xml-query-data-model::p)))
;;; (xqdm:find-namespace "#" :if-does-not-exist :load)
| null | https://raw.githubusercontent.com/lisp/de.setf.xml/827681c969342096c3b95735d84b447befa69fa6/namespaces/www-csd-abdn-ac-uk/research/AgentCities/ontologies/pubs/namespace.lisp | lisp | Syntax : ansi - common - lisp ; Base : 10 ; Package : xml - query - data - model ; -*-
(xqdm:find-namespace "#" :if-does-not-exist :load) |
(in-package :xml-query-data-model)
(setq xml-query-data-model:*namespace*
(xml-query-data-model:defnamespace "#"
(:use)
(:nicknames)
(:export)
(:documentation nil)))
(let ((xml-query-data-model::p
(or (find-package "#")
(make-package "#"
:use
nil
:nicknames
'nil))))
(dolist (xml-query-data-model::s 'nil)
(export (intern xml-query-data-model::s xml-query-data-model::p)
xml-query-data-model::p)))
|
9db0b9e3786be24703e4fda87f9c9aad5fa78bde34e53c58d1ec378b9c37993a | lpgauth/fast_disk_log | fast_disk_log_writer.erl | -module(fast_disk_log_writer).
-include("fast_disk_log.hrl").
-export([
init/5,
start_link/4
]).
-record(state, {
fd,
logger,
name,
timer_delay,
timer_ref,
write_count = 0
}).
%% public
-spec init(pid(), atom(), name(), filename(), open_options()) -> ok | no_return().
init(Parent, Name, Logger, Filename, Opts) ->
case file:open(Filename, [append, raw]) of
{ok, Fd} ->
register(Name, self()),
proc_lib:init_ack(Parent, {ok, self()}),
State = #state {
name = Name,
fd = Fd,
logger = Logger
},
case ?LOOKUP(auto_close, Opts, ?DEFAULT_AUTO_CLOSE) of
true ->
AutoCloseDelay = ?ENV(max_delay, ?DEFAULT_MAX_DELAY) * 2,
loop(State#state {
timer_delay = AutoCloseDelay,
timer_ref = new_timer(AutoCloseDelay, auto_close)
});
false ->
loop(State)
end;
{error, Reason} ->
?ERROR_MSG("failed to open file: ~p ~p~n", [Reason, Filename]),
ok
end.
-spec start_link(atom(), name(), filename(), open_options()) -> {ok, pid()}.
start_link(Name, Logger, Filename, Opts) ->
proc_lib:start_link(?MODULE, init, [self(), Name, Logger, Filename, Opts]).
%% private
close_wait(0) ->
[];
close_wait(N) ->
receive
{write, Buffer} ->
[Buffer | close_wait(N - 1)]
after ?CLOSE_TIMEOUT ->
[]
end.
handle_msg(auto_close, #state {
write_count = 0,
logger = Logger
} = State) ->
spawn(fun () -> fast_disk_log:close(Logger) end),
{ok, State};
handle_msg(auto_close, #state {timer_delay = TimerDelay} = State) ->
{ok, State#state {
timer_ref = new_timer(TimerDelay, auto_close),
write_count = 0
}};
handle_msg({close, PoolSize, Pid}, #state {
fd = Fd,
name = Name
}) ->
Buffer = lists:reverse(close_wait(PoolSize)),
case file:write(Fd, Buffer) of
ok -> ok;
{error, Reason} ->
?ERROR_MSG("failed to write: ~p~n", [Reason])
end,
case file:sync(Fd) of
ok -> ok;
{error, Reason2} ->
?ERROR_MSG("failed to sync: ~p~n", [Reason2])
end,
case file:close(Fd) of
ok -> ok;
{error, Reason3} ->
?ERROR_MSG("failed to close: ~p~n", [Reason3])
end,
Pid ! {fast_disk_log, {closed, Name}},
ok = supervisor:terminate_child(?SUPERVISOR, Name);
handle_msg({write, Buffer}, #state {
fd = Fd,
write_count = WriteCount
} = State) ->
case file:write(Fd, Buffer) of
ok ->
{ok, State#state {
write_count = WriteCount + 1
}};
{error, Reason} ->
?ERROR_MSG("failed to write: ~p~n", [Reason]),
{ok, State}
end.
loop(State) ->
receive Msg ->
{ok, State2} = handle_msg(Msg, State),
loop(State2)
end.
new_timer(Delay, Msg) ->
erlang:send_after(Delay, self(), Msg).
| null | https://raw.githubusercontent.com/lpgauth/fast_disk_log/ce781aafadba416e187108aaa51d1bc0024dbf53/src/fast_disk_log_writer.erl | erlang | public
private | -module(fast_disk_log_writer).
-include("fast_disk_log.hrl").
-export([
init/5,
start_link/4
]).
-record(state, {
fd,
logger,
name,
timer_delay,
timer_ref,
write_count = 0
}).
-spec init(pid(), atom(), name(), filename(), open_options()) -> ok | no_return().
init(Parent, Name, Logger, Filename, Opts) ->
case file:open(Filename, [append, raw]) of
{ok, Fd} ->
register(Name, self()),
proc_lib:init_ack(Parent, {ok, self()}),
State = #state {
name = Name,
fd = Fd,
logger = Logger
},
case ?LOOKUP(auto_close, Opts, ?DEFAULT_AUTO_CLOSE) of
true ->
AutoCloseDelay = ?ENV(max_delay, ?DEFAULT_MAX_DELAY) * 2,
loop(State#state {
timer_delay = AutoCloseDelay,
timer_ref = new_timer(AutoCloseDelay, auto_close)
});
false ->
loop(State)
end;
{error, Reason} ->
?ERROR_MSG("failed to open file: ~p ~p~n", [Reason, Filename]),
ok
end.
-spec start_link(atom(), name(), filename(), open_options()) -> {ok, pid()}.
start_link(Name, Logger, Filename, Opts) ->
proc_lib:start_link(?MODULE, init, [self(), Name, Logger, Filename, Opts]).
close_wait(0) ->
[];
close_wait(N) ->
receive
{write, Buffer} ->
[Buffer | close_wait(N - 1)]
after ?CLOSE_TIMEOUT ->
[]
end.
handle_msg(auto_close, #state {
write_count = 0,
logger = Logger
} = State) ->
spawn(fun () -> fast_disk_log:close(Logger) end),
{ok, State};
handle_msg(auto_close, #state {timer_delay = TimerDelay} = State) ->
{ok, State#state {
timer_ref = new_timer(TimerDelay, auto_close),
write_count = 0
}};
handle_msg({close, PoolSize, Pid}, #state {
fd = Fd,
name = Name
}) ->
Buffer = lists:reverse(close_wait(PoolSize)),
case file:write(Fd, Buffer) of
ok -> ok;
{error, Reason} ->
?ERROR_MSG("failed to write: ~p~n", [Reason])
end,
case file:sync(Fd) of
ok -> ok;
{error, Reason2} ->
?ERROR_MSG("failed to sync: ~p~n", [Reason2])
end,
case file:close(Fd) of
ok -> ok;
{error, Reason3} ->
?ERROR_MSG("failed to close: ~p~n", [Reason3])
end,
Pid ! {fast_disk_log, {closed, Name}},
ok = supervisor:terminate_child(?SUPERVISOR, Name);
handle_msg({write, Buffer}, #state {
fd = Fd,
write_count = WriteCount
} = State) ->
case file:write(Fd, Buffer) of
ok ->
{ok, State#state {
write_count = WriteCount + 1
}};
{error, Reason} ->
?ERROR_MSG("failed to write: ~p~n", [Reason]),
{ok, State}
end.
loop(State) ->
receive Msg ->
{ok, State2} = handle_msg(Msg, State),
loop(State2)
end.
new_timer(Delay, Msg) ->
erlang:send_after(Delay, self(), Msg).
|
1fbff33136a39f843326620258e26803c343dbea581a7dac5b1705031591955f | onedata/op-worker | cdmi_test_base.erl | %%%-------------------------------------------------------------------
@author
( C ) 2015 ACK CYFRONET AGH
This software is released under the MIT license
cited in ' LICENSE.txt ' .
%%% @end
%%%-------------------------------------------------------------------
%%% @doc
CDMI tests
%%% @end
%%%-------------------------------------------------------------------
-module(cdmi_test_base).
-author("Tomasz Lichon").
-include("global_definitions.hrl").
-include("http/cdmi.hrl").
-include("http/rest.hrl").
-include("modules/fslogic/acl.hrl").
-include("modules/fslogic/fslogic_common.hrl").
-include("modules/fslogic/file_attr.hrl").
-include("modules/fslogic/metadata.hrl").
-include("modules/logical_file_manager/lfm.hrl").
-include("proto/common/credentials.hrl").
-include_lib("ctool/include/errors.hrl").
-include_lib("ctool/include/logging.hrl").
-include_lib("ctool/include/http/headers.hrl").
-include_lib("ctool/include/test/assertions.hrl").
-include_lib("ctool/include/test/performance.hrl").
-include_lib("ctool/include/test/test_utils.hrl").
-export([
list_dir/1,
get_file/1,
metadata/1,
delete_file/1,
delete_dir/1,
create_file/1,
update_file/1,
create_dir/1,
capabilities/1,
use_supported_cdmi_version/1,
use_unsupported_cdmi_version/1,
moved_permanently/1,
objectid/1,
request_format_check/1,
mimetype_and_encoding/1,
out_of_range/1,
partial_upload/1,
acl/1,
errors/1,
accept_header/1,
move_copy_conflict/1,
move/1,
copy/1,
create_raw_file_with_cdmi_version_header_should_succeed/1,
create_raw_dir_with_cdmi_version_header_should_succeed/1,
create_cdmi_file_without_cdmi_version_header_should_fail/1,
create_cdmi_dir_without_cdmi_version_header_should_fail/1
]).
-define(TIMEOUT, timer:seconds(5)).
user_1_token_header(Config) ->
rest_test_utils:user_token_header(?config({access_token, <<"user1">>}, Config)).
-define(CDMI_VERSION_HEADER, {<<"X-CDMI-Specification-Version">>, <<"1.1.1">>}).
-define(CONTAINER_CONTENT_TYPE_HEADER, {?HDR_CONTENT_TYPE, <<"application/cdmi-container">>}).
-define(OBJECT_CONTENT_TYPE_HEADER, {?HDR_CONTENT_TYPE, <<"application/cdmi-object">>}).
-define(FILE_BEGINNING, 0).
-define(INFINITY, 9999).
%%%===================================================================
%%% Test functions
%%%===================================================================
% Tests cdmi container GET request (also refered as LIST)
list_dir(Config) ->
Workers = ?config(op_worker_nodes, Config),
{SpaceName, ShortTestDirName, TestDirName, TestFileName, _FullTestFileName, _TestFileContent} =
create_test_dir_and_file(Config),
TestDirNameCheck = list_to_binary(ShortTestDirName ++ "/"),
TestFileNameBin = list_to_binary(TestFileName),
%%------ list basic dir --------
{ok, Code1, Headers1, Response1} =
do_request(Workers, TestDirName ++ "/", get,
[user_1_token_header(Config), ?CDMI_VERSION_HEADER], []),
?assertEqual(?HTTP_200_OK, Code1),
?assertMatch(#{?HDR_CONTENT_TYPE := <<"application/cdmi-container">>}, Headers1),
CdmiResponse1 = json_utils:decode(Response1),
?assertMatch(#{<<"objectType">> := <<"application/cdmi-container">>},
CdmiResponse1),
?assertMatch(#{<<"objectName">> := TestDirNameCheck}, CdmiResponse1),
?assertMatch(#{<<"completionStatus">> := <<"Complete">>}, CdmiResponse1),
?assertMatch(#{<<"children">> := [TestFileNameBin]}, CdmiResponse1),
?assert(maps:get(<<"metadata">>, CdmiResponse1) =/= <<>>),
%%------------------------------
%%------ list root space dir ---------
{ok, Code2, _Headers2, Response2} =
do_request(Workers, SpaceName ++ "/", get,
[user_1_token_header(Config), ?CDMI_VERSION_HEADER], []),
?assertEqual(?HTTP_200_OK, Code2),
CdmiResponse2 = json_utils:decode(Response2),
SpaceDirName = list_to_binary(SpaceName ++ "/"),
?assertMatch(#{<<"objectName">> := SpaceDirName}, CdmiResponse2),
?assertMatch(#{<<"children">> := [TestDirNameCheck]}, CdmiResponse2),
%%------------------------------
--- list -----
{ok, Code3, _Headers3, _Response3} =
do_request(Workers, "nonexisting_dir/",
get, [user_1_token_header(Config), ?CDMI_VERSION_HEADER], []),
?assertEqual(?HTTP_404_NOT_FOUND, Code3),
%%------------------------------
%%-- selective params list -----
{ok, Code4, _Headers4, Response4} =
do_request(Workers, TestDirName ++ "/?children;objectName",
get, [user_1_token_header(Config), ?CDMI_VERSION_HEADER], []),
?assertEqual(?HTTP_200_OK, Code4),
CdmiResponse4 = json_utils:decode(Response4),
?assertMatch(#{<<"objectName">> := TestDirNameCheck}, CdmiResponse4),
?assertMatch(#{<<"children">> := [TestFileNameBin]}, CdmiResponse4),
?assertEqual(2, maps:size(CdmiResponse4)),
%%------------------------------
%%---- childrenrange list ------
ChildrangeDir = SpaceName ++ "/childrange/",
mkdir(Config, ChildrangeDir),
Childs = ["0", "1", "2", "3", "4", "5", "6", "7", "8", "9", "10", "11",
"12", "13", "14"],
ChildsBinaries = lists:map(fun(X) -> list_to_binary(X) end, Childs),
lists:map(fun(FileName) ->
create_file(Config, filename:join(ChildrangeDir, FileName))
end, Childs),
{ok, Code5, _Headers5, Response5} =
do_request(Workers, ChildrangeDir ++ "?children;childrenrange",
get, [user_1_token_header(Config), ?CDMI_VERSION_HEADER], []),
?assertEqual(?HTTP_200_OK, Code5),
CdmiResponse5 = (json_utils:decode(Response5)),
ChildrenResponse1 = maps:get(<<"children">>, CdmiResponse5),
?assert(is_list(ChildrenResponse1)),
lists:foreach(fun(Name) ->
?assert(lists:member(Name, ChildrenResponse1))
end, ChildsBinaries),
?assertMatch(#{<<"childrenrange">> := <<"0-14">>}, CdmiResponse5),
{ok, Code6, _, Response6} =
do_request(Workers, ChildrangeDir ++ "?children:2-13;childrenrange", get,
[user_1_token_header(Config), ?CDMI_VERSION_HEADER], []),
{ok, Code7, _, Response7} =
do_request(Workers, ChildrangeDir ++ "?children:0-1;childrenrange", get,
[user_1_token_header(Config), ?CDMI_VERSION_HEADER], []),
{ok, Code8, _, Response8} =
do_request(Workers, ChildrangeDir ++ "?children:14-14;childrenrange", get,
[user_1_token_header(Config), ?CDMI_VERSION_HEADER], []),
?assertEqual(?HTTP_200_OK, Code6),
?assertEqual(?HTTP_200_OK, Code7),
?assertEqual(?HTTP_200_OK, Code8),
CdmiResponse6 = json_utils:decode(Response6),
CdmiResponse7 = json_utils:decode(Response7),
CdmiResponse8 = json_utils:decode(Response8),
ChildrenResponse6 = maps:get(<<"children">>, CdmiResponse6),
ChildrenResponse7 = maps:get(<<"children">>, CdmiResponse7),
ChildrenResponse8 = maps:get(<<"children">>, CdmiResponse8),
?assert(is_list(ChildrenResponse6)),
?assert(is_list(ChildrenResponse7)),
?assert(is_list(ChildrenResponse8)),
?assertEqual(12, length(ChildrenResponse6)),
?assertEqual(2, length(ChildrenResponse7)),
?assertEqual(1, length(ChildrenResponse8)),
?assertMatch(#{<<"childrenrange">> := <<"2-13">>}, CdmiResponse6),
?assertMatch(#{<<"childrenrange">> := <<"0-1">>}, CdmiResponse7),
?assertMatch(#{<<"childrenrange">> := <<"14-14">>}, CdmiResponse8),
lists:foreach(
fun(Name) ->
?assert(lists:member(Name,
ChildrenResponse6 ++ ChildrenResponse7 ++ ChildrenResponse8))
end, ChildsBinaries).
%%------------------------------
Tests cdmi object GET request . Request can be done without header ( in that case
%% file conent is returned as response body), or with cdmi header (the response
%% contains json string of type: application/cdmi-object, and we can specify what
%% parameters we need by listing then as ';' separated list after '?' in URL )
get_file(Config) ->
[{_SpaceId, SpaceName} | _] = ?config({spaces, <<"user1">>}, Config),
EmptyFileName = filename:join([binary_to_list(SpaceName), "empty.txt"]),
FilledFileName = filename:join([binary_to_list(SpaceName), "toRead.txt"]),
FileContent = <<"Some content...">>,
Workers = ?config(op_worker_nodes, Config),
{ok, _} = create_file(Config, EmptyFileName),
{ok, _} = create_file(Config, FilledFileName),
?assert(object_exists(Config, FilledFileName)),
{ok, _} = write_to_file(Config, FilledFileName, FileContent, ?FILE_BEGINNING),
?assertEqual(FileContent, get_file_content(Config, FilledFileName)),
%%-------- basic read ----------
RequestHeaders1 = [?CDMI_VERSION_HEADER, user_1_token_header(Config)],
{ok, Code1, _Headers1, Response1} = do_request(Workers, FilledFileName, get, RequestHeaders1, []),
?assertEqual(?HTTP_200_OK, Code1),
CdmiResponse1 = json_utils:decode(Response1),
FileContent1 = base64:encode(FileContent),
SpaceName1 = <<"/", SpaceName/binary, "/">>,
?assertMatch(#{<<"objectType">> := <<"application/cdmi-object">>}, CdmiResponse1),
?assertMatch(#{<<"objectName">> := <<"toRead.txt">>}, CdmiResponse1),
?assertMatch(#{<<"completionStatus">> := <<"Complete">>}, CdmiResponse1),
?assertMatch(#{<<"valuetransferencoding">> := <<"base64">>}, CdmiResponse1),
?assertMatch(#{<<"mimetype">> := <<"application/octet-stream">>}, CdmiResponse1),
?assertMatch(#{<<"parentURI">> := SpaceName1}, CdmiResponse1),
?assertMatch(#{<<"value">> := FileContent1}, CdmiResponse1),
?assertMatch(#{<<"valuerange">> := <<"0-14">>}, CdmiResponse1),
?assert(maps:get(<<"metadata">>, CdmiResponse1) =/= <<>>),
%%------------------------------
%%-- selective params read -----
RequestHeaders2 = [?CDMI_VERSION_HEADER, user_1_token_header(Config)],
{ok, Code2, _Headers2, Response2} = do_request(Workers, FilledFileName ++ "?parentURI;completionStatus", get, RequestHeaders2, []),
?assertEqual(?HTTP_200_OK, Code2),
CdmiResponse2 = json_utils:decode(Response2),
?assertMatch(#{<<"completionStatus">> := <<"Complete">>}, CdmiResponse2),
?assertMatch(#{<<"parentURI">> := SpaceName1}, CdmiResponse2),
?assertEqual(2, maps:size(CdmiResponse2)),
%%------------------------------
%%--- selective value read -----
RequestHeaders3 = [?CDMI_VERSION_HEADER, user_1_token_header(Config)],
{ok, Code3, _Headers3, Response3} = do_request(Workers, FilledFileName ++ "?value:1-3;valuerange", get, RequestHeaders3, []),
?assertEqual(?HTTP_200_OK, Code3),
CdmiResponse3 = json_utils:decode(Response3),
?assertMatch(#{<<"valuerange">> := <<"1-3">>}, CdmiResponse3),
1 - 3 from FileContent = < < " Some content ... " > >
%%------------------------------
%%------- noncdmi read --------
{ok, Code4, Headers4, Response4} =
do_request(Workers, FilledFileName, get, [user_1_token_header(Config)]),
?assertEqual(?HTTP_200_OK, Code4),
?assertMatch(#{?HDR_CONTENT_TYPE := <<"application/octet-stream">>}, Headers4),
?assertEqual(FileContent, Response4),
%%------------------------------
------- read --------
RequestHeaders5 = [?CDMI_VERSION_HEADER, user_1_token_header(Config)],
{ok, Code5, _Headers5, Response5} = do_request(Workers, FilledFileName ++ "?objectID", get, RequestHeaders5, []),
?assertEqual(?HTTP_200_OK, Code5),
CdmiResponse5 = (json_utils:decode(Response5)),
ObjectID = maps:get(<<"objectID">>, CdmiResponse5),
?assert(is_binary(ObjectID)),
%%------------------------------
%%-------- read by id ----------
RequestHeaders6 = [?CDMI_VERSION_HEADER, user_1_token_header(Config)],
{ok, Code6, _Headers6, Response6} = do_request(Workers, "cdmi_objectid/" ++ binary_to_list(ObjectID), get, RequestHeaders6, []),
?assertEqual(?HTTP_200_OK, Code6),
CdmiResponse6 = (json_utils:decode(Response6)),
?assertEqual(FileContent, base64:decode(maps:get(<<"value">>, CdmiResponse6))),
%%------------------------------
%% selective value single range read non-cdmi
?assertMatch(
{ok, ?HTTP_206_PARTIAL_CONTENT, #{?HDR_CONTENT_RANGE := <<"bytes 5-8/15">>}, <<"cont">>},
do_request(Workers, FilledFileName, get, [
{?HDR_RANGE, <<"bytes=5-8">>}, user_1_token_header(Config)
])
),
%%------------------------------
%% selective value multi range read non-cdmi
{ok, _, #{
?HDR_CONTENT_TYPE := <<"multipart/byteranges; boundary=", Boundary/binary>>
}, Response8} = ?assertMatch(
{ok, ?HTTP_206_PARTIAL_CONTENT, #{?HDR_CONTENT_TYPE := <<"multipart/byteranges", _/binary>>}, _},
do_request(Workers, FilledFileName, get, [
{?HDR_RANGE, <<"bytes=1-3,5-5,-3">>}, user_1_token_header(Config)
])
),
ExpResponse8 = <<
"--", Boundary/binary,
"\r\ncontent-type: application/octet-stream\r\ncontent-range: bytes 1-3/15",
"\r\n\r\nome",
"--", Boundary/binary,
"\r\ncontent-type: application/octet-stream\r\ncontent-range: bytes 5-5/15",
"\r\n\r\nc",
"--", Boundary/binary,
"\r\ncontent-type: application/octet-stream\r\ncontent-range: bytes 12-14/15",
"\r\n\r\n...\r\n",
"--", Boundary/binary, "--"
>>,
?assertEqual(ExpResponse8, Response8),
%%------------------------------
read file non - cdmi with invalid Range should fail
lists:foreach(fun(InvalidRange) ->
?assertMatch(
{ok, ?HTTP_416_RANGE_NOT_SATISFIABLE, #{?HDR_CONTENT_RANGE := <<"bytes */15">>}, <<>>},
do_request(Workers, FilledFileName, get, [
{?HDR_RANGE, InvalidRange}, user_1_token_header(Config)
])
)
end, [
<<"unicorns">>,
<<"bytes:5-10">>,
<<"bytes=5=10">>,
<<"bytes=-15-10">>,
<<"bytes=100-150">>,
<<"bytes=10-5">>,
<<"bytes=-5-">>,
<<"bytes=10--5">>,
<<"bytes=10-15-">>
]),
%%------------------------------
%% read empty file non-cdmi without Range
?assertMatch(
{ok, ?HTTP_200_OK, _, <<>>},
do_request(Workers, EmptyFileName, get, [user_1_token_header(Config)])
),
%%------------------------------
read empty file non - cdmi with Range should return 416
?assertMatch(
{ok, ?HTTP_416_RANGE_NOT_SATISFIABLE, #{?HDR_CONTENT_RANGE := <<"bytes */0">>}, <<>>},
do_request(Workers, EmptyFileName, get, [
{?HDR_RANGE, <<"bytes=10-15">>}, user_1_token_header(Config)
])
).
% Tests cdmi metadata read on object GET request.
metadata(Config) ->
Workers = ?config(op_worker_nodes, Config),
[{_SpaceId, SpaceName} | _] = ?config({spaces, <<"user1">>}, Config),
UserId1 = ?config({user_id, <<"user1">>}, Config),
FileName = filename:join([binary_to_list(SpaceName), "metadataTest.txt"]),
FileContent = <<"Some content...">>,
DirName = filename:join([binary_to_list(SpaceName), "metadataTestDir"]) ++ "/",
%%-------- create file with user metadata --------
?assert(not object_exists(Config, FileName)),
RequestHeaders1 = [?OBJECT_CONTENT_TYPE_HEADER, ?CDMI_VERSION_HEADER, user_1_token_header(Config)],
RequestBody1 = #{
<<"value">> => FileContent,
<<"valuetransferencoding">> => <<"utf-8">>,
<<"mimetype">> => <<"text/plain">>,
<<"metadata">> => #{<<"my_metadata">> => <<"my_value">>,
<<"cdmi_not_allowed">> => <<"my_value">>}},
RawRequestBody1 = json_utils:encode(RequestBody1),
Before = time:seconds_to_datetime(global_clock:timestamp_seconds()),
{ok, Code1, _Headers1, Response1} = do_request(Workers, FileName, put, RequestHeaders1, RawRequestBody1),
After = time:seconds_to_datetime(global_clock:timestamp_seconds()),
?assertEqual(?HTTP_201_CREATED, Code1),
CdmiResponse1 = (json_utils:decode(Response1)),
Metadata = maps:get(<<"metadata">>, CdmiResponse1),
Metadata1 = Metadata,
?assertMatch(#{<<"cdmi_size">> := <<"15">>}, Metadata1),
CTime1 = time:iso8601_to_datetime(maps:get(<<"cdmi_ctime">>, Metadata1)),
ATime1 = time:iso8601_to_datetime(maps:get(<<"cdmi_atime">>, Metadata1)),
MTime1 = time:iso8601_to_datetime(maps:get(<<"cdmi_mtime">>, Metadata1)),
?assert(Before =< ATime1),
?assert(Before =< MTime1),
?assert(Before =< CTime1),
?assert(ATime1 =< After),
?assert(MTime1 =< After),
?assert(CTime1 =< After),
?assertMatch(UserId1, maps:get(<<"cdmi_owner">>, Metadata1)),
?assertMatch(#{<<"my_metadata">> := <<"my_value">>}, Metadata1),
?assertEqual(6, maps:size(Metadata1)),
%%-- selective metadata read -----
{ok, ?HTTP_200_OK, _Headers2, Response2} = do_request(Workers, FileName ++ "?metadata", get, RequestHeaders1, []),
CdmiResponse2 = (json_utils:decode(Response2)),
?assertEqual(1, maps:size(CdmiResponse2)),
Metadata2 = maps:get(<<"metadata">>, CdmiResponse2),
?assertEqual(6, maps:size(Metadata2)),
%%-- selective metadata read with prefix -----
{ok, ?HTTP_200_OK, _Headers3, Response3} = do_request(Workers, FileName ++ "?metadata:cdmi_", get, RequestHeaders1, []),
CdmiResponse3 = (json_utils:decode(Response3)),
?assertEqual(1, maps:size(CdmiResponse3)),
Metadata3 = maps:get(<<"metadata">>, CdmiResponse3),
?assertEqual(5, maps:size(Metadata3)),
{ok, ?HTTP_200_OK, _Headers4, Response4} = do_request(Workers, FileName ++ "?metadata:cdmi_o", get, RequestHeaders1, []),
CdmiResponse4 = json_utils:decode(Response4),
?assertEqual(1, maps:size(CdmiResponse4)),
Metadata4 = maps:get(<<"metadata">>, CdmiResponse4),
?assertMatch(UserId1, maps:get(<<"cdmi_owner">>, Metadata4)),
?assertEqual(1, maps:size(Metadata4)),
{ok, ?HTTP_200_OK, _Headers5, Response5} = do_request(Workers, FileName ++ "?metadata:cdmi_size", get, RequestHeaders1, []),
CdmiResponse5 = json_utils:decode(Response5),
?assertEqual(1, maps:size(CdmiResponse5)),
Metadata5 = maps:get(<<"metadata">>, CdmiResponse5),
?assertMatch(#{<<"cdmi_size">> := <<"15">>}, Metadata5),
?assertEqual(1, maps:size(Metadata5)),
{ok, ?HTTP_200_OK, _Headers6, Response6} = do_request(Workers, FileName ++ "?metadata:cdmi_no_such_metadata", get, RequestHeaders1, []),
CdmiResponse6 = json_utils:decode(Response6),
?assertEqual(1, maps:size(CdmiResponse6)),
?assertMatch(#{<<"metadata">> := #{}}, CdmiResponse6),
%%------ update user metadata of a file ----------
RequestBody7 = #{<<"metadata">> => #{<<"my_new_metadata">> => <<"my_new_value">>}},
RawRequestBody7 = json_utils:encode(RequestBody7),
{ok, ?HTTP_204_NO_CONTENT, _, _} = do_request(Workers, FileName, put, RequestHeaders1, RawRequestBody7),
{ok, ?HTTP_200_OK, _Headers7, Response7} = do_request(Workers, FileName ++ "?metadata:my", get, RequestHeaders1, []),
CdmiResponse7 = (json_utils:decode(Response7)),
?assertEqual(1, maps:size(CdmiResponse7)),
Metadata7 = maps:get(<<"metadata">>, CdmiResponse7),
?assertMatch(#{<<"my_new_metadata">> := <<"my_new_value">>}, Metadata7),
?assertEqual(1, maps:size(Metadata7)),
RequestBody8 = #{<<"metadata">> =>
#{<<"my_new_metadata_add">> => <<"my_new_value_add">>,
<<"my_new_metadata">> => <<"my_new_value_update">>,
<<"cdmi_not_allowed">> => <<"my_value">>}},
RawRequestBody8 = json_utils:encode(RequestBody8),
{ok, ?HTTP_204_NO_CONTENT, _, _} = do_request(Workers, FileName ++ "?metadata:my_new_metadata_add;metadata:my_new_metadata;metadata:cdmi_not_allowed",
put, RequestHeaders1, RawRequestBody8),
{ok, ?HTTP_200_OK, _Headers8, Response8} = do_request(Workers, FileName ++ "?metadata:my", get, RequestHeaders1, []),
CdmiResponse8 = (json_utils:decode(Response8)),
?assertEqual(1, maps:size(CdmiResponse8)),
Metadata8 = maps:get(<<"metadata">>, CdmiResponse8),
?assertMatch(#{<<"my_new_metadata_add">> := <<"my_new_value_add">>}, Metadata8),
?assertMatch(#{<<"my_new_metadata">> := <<"my_new_value_update">>}, Metadata8),
?assertEqual(2, maps:size(Metadata8)),
{ok, ?HTTP_200_OK, _Headers9, Response9} = do_request(Workers, FileName ++ "?metadata:cdmi_", get, RequestHeaders1, []),
CdmiResponse9 = (json_utils:decode(Response9)),
?assertEqual(1, maps:size(CdmiResponse9)),
Metadata9 = maps:get(<<"metadata">>, CdmiResponse9),
?assertEqual(5, maps:size(Metadata9)),
RequestBody10 = #{<<"metadata">> => #{<<"my_new_metadata">> => <<"my_new_value_ignore">>}},
RawRequestBody10 = json_utils:encode(RequestBody10),
{ok, ?HTTP_204_NO_CONTENT, _, _} = do_request(Workers, FileName ++ "?metadata:my_new_metadata_add", put, RequestHeaders1,
RawRequestBody10),
{ok, ?HTTP_200_OK, _Headers10, Response10} = do_request(Workers, FileName ++ "?metadata:my", get, RequestHeaders1, []),
CdmiResponse10 = (json_utils:decode(Response10)),
?assertEqual(1, maps:size(CdmiResponse10)),
Metadata10 = maps:get(<<"metadata">>, CdmiResponse10),
?assertMatch(#{<<"my_new_metadata">> := <<"my_new_value_update">>}, Metadata10),
?assertEqual(1, maps:size(Metadata10)),
%%------ create directory with user metadata ----------
RequestHeaders2 = [?CONTAINER_CONTENT_TYPE_HEADER, ?CDMI_VERSION_HEADER, user_1_token_header(Config)],
RequestBody11 = #{<<"metadata">> => #{<<"my_metadata">> => <<"my_dir_value">>}},
RawRequestBody11 = json_utils:encode(RequestBody11),
{ok, ?HTTP_201_CREATED, _Headers11, Response11} = do_request(Workers, DirName, put, RequestHeaders2, RawRequestBody11),
CdmiResponse11 = (json_utils:decode(Response11)),
Metadata11 = maps:get(<<"metadata">>, CdmiResponse11),
?assertMatch(#{<<"my_metadata">> := <<"my_dir_value">>}, Metadata11),
%%------ update user metadata of a directory ----------
RequestBody12 = #{<<"metadata">> => #{<<"my_metadata">> => <<"my_dir_value_update">>}},
RawRequestBody12 = json_utils:encode(RequestBody12),
{ok, ?HTTP_204_NO_CONTENT, _, _} = do_request(Workers, DirName, put, RequestHeaders2, RawRequestBody12),
{ok, ?HTTP_200_OK, _Headers13, Response13} = do_request(Workers, DirName ++ "?metadata:my", get, RequestHeaders1, []),
CdmiResponse13 = (json_utils:decode(Response13)),
?assertEqual(1, maps:size(CdmiResponse13)),
Metadata13 = maps:get(<<"metadata">>, CdmiResponse13),
?assertMatch(#{<<"my_metadata">> := <<"my_dir_value_update">>}, Metadata13),
?assertEqual(1, maps:size(Metadata13)),
%%------------------------------
%%------ write acl metadata ----------
UserId1 = ?config({user_id, <<"user1">>}, Config),
UserName1 = ?config({user_name, <<"user1">>}, Config),
FileName2 = filename:join([binary_to_list(SpaceName), "acl_test_file.txt"]),
Ace1 = ace:to_json(#access_control_entity{
acetype = ?allow_mask,
identifier = UserId1,
name = UserName1,
aceflags = ?no_flags_mask,
acemask = ?read_all_object_mask
}, cdmi),
Ace2 = ace:to_json(#access_control_entity{
acetype = ?allow_mask,
identifier = UserId1,
name = UserName1,
aceflags = ?no_flags_mask,
acemask = ?write_all_object_mask
}, cdmi),
Ace2Full = #{
<<"acetype">> => ?allow,
<<"identifier">> => <<UserName1/binary, "#", UserId1/binary>>,
<<"aceflags">> => ?no_flags,
<<"acemask">> => <<
?write_object/binary, ",",
?write_metadata/binary, ",",
?write_attributes/binary, ",",
?delete/binary, ",",
?write_acl/binary
>>
},
create_file(Config, FileName2),
write_to_file(Config, FileName2, <<"data">>, 0),
RequestBody15 = #{<<"metadata">> => #{<<"cdmi_acl">> => [Ace1, Ace2Full]}},
RawRequestBody15 = json_utils:encode(RequestBody15),
RequestHeaders15 = [?OBJECT_CONTENT_TYPE_HEADER, ?CDMI_VERSION_HEADER, user_1_token_header(Config)],
{ok, Code15, _Headers15, Response15} = do_request(Workers, FileName2 ++ "?metadata:cdmi_acl", put, RequestHeaders15, RawRequestBody15),
?assertMatch({?HTTP_204_NO_CONTENT, _}, {Code15, Response15}),
{ok, Code16, _Headers16, Response16} = do_request(Workers, FileName2 ++ "?metadata", get, RequestHeaders1, []),
?assertEqual(?HTTP_200_OK, Code16),
CdmiResponse16 = (json_utils:decode(Response16)),
?assertEqual(1, maps:size(CdmiResponse16)),
Metadata16 = maps:get(<<"metadata">>, CdmiResponse16),
?assertEqual(6, maps:size(Metadata16)),
?assertMatch(#{<<"cdmi_acl">> := [Ace1, Ace2]}, Metadata16),
{ok, Code17, _Headers17, Response17} = do_request(Workers, FileName2, get, [user_1_token_header(Config)], []),
?assertEqual(?HTTP_200_OK, Code17),
?assertEqual(<<"data">>, Response17),
%%------------------------------
%%-- create forbidden by acl ---
Ace3 = ace:to_json(#access_control_entity{
acetype = ?allow_mask,
identifier = UserId1,
name = UserName1,
aceflags = ?no_flags_mask,
acemask = ?write_metadata_mask
}, cdmi),
Ace4 = ace:to_json(#access_control_entity{
acetype = ?deny_mask,
identifier = UserId1,
name = UserName1,
aceflags = ?no_flags_mask,
acemask = ?write_object_mask
}, cdmi),
RequestBody18 = #{<<"metadata">> => #{<<"cdmi_acl">> => [Ace3, Ace4]}},
RawRequestBody18 = json_utils:encode(RequestBody18),
RequestHeaders18 = [user_1_token_header(Config), ?CONTAINER_CONTENT_TYPE_HEADER, ?CDMI_VERSION_HEADER],
{ok, Code18, _Headers18, _Response18} = do_request(Workers, DirName ++ "?metadata:cdmi_acl", put, RequestHeaders18, RawRequestBody18),
?assertEqual(?HTTP_204_NO_CONTENT, Code18),
{ok, Code19, _Headers19, Response19} = do_request(Workers, filename:join(DirName, "some_file"), put, [user_1_token_header(Config)], []),
ExpRestError = rest_test_utils:get_rest_error(?ERROR_POSIX(?EACCES)),
?assertMatch(ExpRestError, {Code19, json_utils:decode(Response19)}).
%%------------------------------
% Tests cdmi object DELETE requests
delete_file(Config) ->
[{_SpaceId, SpaceName} | _] = ?config({spaces, <<"user1">>}, Config),
FileName = filename:join([binary_to_list(SpaceName), "toDelete.txt"]),
Workers = ?config(op_worker_nodes, Config),
GroupFileName =
filename:join([binary_to_list(SpaceName), "groupFile"]),
%%----- basic delete -----------
{ok, _} = create_file(Config, "/" ++ FileName),
?assert(object_exists(Config, FileName)),
RequestHeaders1 = [?CDMI_VERSION_HEADER],
{ok, Code1, _Headers1, _Response1} =
do_request(
Workers, FileName, delete, [user_1_token_header(Config) | RequestHeaders1]),
?assertEqual(?HTTP_204_NO_CONTENT, Code1),
?assert(not object_exists(Config, FileName)),
%%------------------------------
%%----- delete group file ------
{ok, _} = create_file(Config, GroupFileName),
RequestHeaders2 = [?CDMI_VERSION_HEADER],
{ok, Code2, _Headers2, _Response2} =
do_request(Workers, GroupFileName, delete,
[user_1_token_header(Config) | RequestHeaders2]),
?assertEqual(?HTTP_204_NO_CONTENT, Code2),
?assert(not object_exists(Config, GroupFileName)).
%%------------------------------
% Tests cdmi container DELETE requests
delete_dir(Config) ->
Workers = ?config(op_worker_nodes, Config),
[{_SpaceId, SpaceName} | _] = ?config({spaces, <<"user1">>}, Config),
DirName = filename:join([binary_to_list(SpaceName), "toDelete"]) ++ "/",
ChildDirName = filename:join([binary_to_list(SpaceName), "toDelete", "child"]) ++ "/",
%%----- basic delete -----------
mkdir(Config, DirName),
?assert(object_exists(Config, DirName)),
RequestHeaders1 = [
user_1_token_header(Config),
?CDMI_VERSION_HEADER,
?CONTAINER_CONTENT_TYPE_HEADER
],
{ok, Code1, _Headers1, _Response1} =
do_request(Workers, DirName, delete, RequestHeaders1, []),
?assertEqual(?HTTP_204_NO_CONTENT, Code1),
?assert(not object_exists(Config, DirName)),
%%------------------------------
%%------ recursive delete ------
mkdir(Config, DirName),
?assert(object_exists(Config, DirName)),
mkdir(Config, ChildDirName),
?assert(object_exists(Config, DirName)),
RequestHeaders2 = [
user_1_token_header(Config),
?CDMI_VERSION_HEADER,
?CONTAINER_CONTENT_TYPE_HEADER
],
{ok, Code2, _Headers2, _Response2} =
do_request(Workers, DirName, delete, RequestHeaders2, []),
?assertEqual(?HTTP_204_NO_CONTENT, Code2),
?assert(not object_exists(Config, DirName)),
?assert(not object_exists(Config, ChildDirName)),
%%------------------------------
----- delete root dir -------
?assert(object_exists(Config, "/")),
RequestHeaders3 = [user_1_token_header(Config), ?CDMI_VERSION_HEADER],
?assert(object_exists(Config, "/")),
{ok, Code3, _Headers3, Response3} =
do_request(Workers, "/", delete, RequestHeaders3, []),
ExpRestError = rest_test_utils:get_rest_error(?ERROR_POSIX(?EPERM)),
?assertMatch(ExpRestError, {Code3, json_utils:decode(Response3)}),
?assert(object_exists(Config, "/")).
%%------------------------------
Tests file creation ( cdmi object PUT ) , It can be done with cdmi header ( when file data is provided as cdmi - object
% json string), or without (when we treat request body as new file content)
create_file(Config) ->
Workers = ?config(op_worker_nodes, Config),
[{_SpaceId, SpaceName} | _] = ?config({spaces, <<"user1">>}, Config),
ToCreate = filename:join([binary_to_list(SpaceName), "file1.txt"]),
ToCreate2 = filename:join([binary_to_list(SpaceName), "file2.txt"]),
ToCreate5 = filename:join([binary_to_list(SpaceName), "file3.txt"]),
ToCreate4 = filename:join([binary_to_list(SpaceName), "file4.txt"]),
FileContent = <<"File content!">>,
%%-------- basic create --------
?assert(not object_exists(Config, ToCreate)),
RequestHeaders1 = [?OBJECT_CONTENT_TYPE_HEADER, ?CDMI_VERSION_HEADER, user_1_token_header(Config)],
RequestBody1 = #{<<"value">> => FileContent},
RawRequestBody1 = json_utils:encode((RequestBody1)),
{ok, Code1, _Headers1, Response1} = do_request(Workers, ToCreate, put, RequestHeaders1, RawRequestBody1),
?assertEqual(?HTTP_201_CREATED, Code1),
CdmiResponse1 = (json_utils:decode(Response1)),
?assertMatch(#{<<"objectType">> := <<"application/cdmi-object">>}, CdmiResponse1),
?assertMatch(#{<<"objectName">> := <<"file1.txt">>}, CdmiResponse1),
SpaceName1 = <<"/", SpaceName/binary, "/">>,
?assertMatch(#{<<"parentURI">> := SpaceName1}, CdmiResponse1),
?assertMatch(#{<<"completionStatus">> := <<"Complete">>}, CdmiResponse1),
Metadata1 = maps:get(<<"metadata">>, CdmiResponse1),
?assertNotEqual([], Metadata1),
?assert(object_exists(Config, ToCreate)),
?assertEqual(FileContent, get_file_content(Config, ToCreate)),
%%------------------------------
%%------ base64 create ---------
?assert(not object_exists(Config, ToCreate2)),
RequestHeaders2 = [?OBJECT_CONTENT_TYPE_HEADER, ?CDMI_VERSION_HEADER, user_1_token_header(Config)],
RequestBody2 = #{<<"valuetransferencoding">> => <<"base64">>,
<<"value">> => base64:encode(FileContent)},
RawRequestBody2 = json_utils:encode((RequestBody2)),
{ok, Code2, _Headers2, Response2} =
do_request(Workers, ToCreate2, put, RequestHeaders2, RawRequestBody2),
?assertEqual(?HTTP_201_CREATED, Code2),
CdmiResponse2 = (json_utils:decode(Response2)),
?assertMatch(#{<<"objectType">> := <<"application/cdmi-object">>}, CdmiResponse2),
?assertMatch(#{<<"objectName">> := <<"file2.txt">>}, CdmiResponse2),
SpaceName1 = <<"/", SpaceName/binary, "/">>,
?assertMatch(#{<<"parentURI">> := SpaceName1}, CdmiResponse2),
?assertMatch(#{<<"completionStatus">> := <<"Complete">>}, CdmiResponse2),
?assert(maps:get(<<"metadata">>, CdmiResponse2) =/= <<>>),
?assert(object_exists(Config, ToCreate2)),
?assertEqual(FileContent, get_file_content(Config, ToCreate2)),
%%------------------------------
%%------- create empty ---------
?assert(not object_exists(Config, ToCreate4)),
RequestHeaders4 = [?OBJECT_CONTENT_TYPE_HEADER, ?CDMI_VERSION_HEADER, user_1_token_header(Config)],
{ok, Code4, _Headers4, _Response4} = do_request(Workers, ToCreate4, put, RequestHeaders4, []),
?assertEqual(?HTTP_201_CREATED, Code4),
?assert(object_exists(Config, ToCreate4)),
?assertEqual(<<>>, get_file_content(Config, ToCreate4)),
%%------------------------------
%%------ create noncdmi --------
?assert(not object_exists(Config, ToCreate5)),
RequestHeaders5 = [{?HDR_CONTENT_TYPE, <<"application/binary">>}],
{ok, Code5, _Headers5, _Response5} =
do_request(Workers, ToCreate5, put,
[user_1_token_header(Config) | RequestHeaders5], FileContent),
?assertEqual(?HTTP_201_CREATED, Code5),
?assert(object_exists(Config, ToCreate5)),
?assertEqual(FileContent, get_file_content(Config, ToCreate5)).
%%------------------------------
Tests cdmi object PUT requests ( updating content )
update_file(Config) ->
Workers = ?config(op_worker_nodes, Config),
{_SpaceName, _ShortTestDirName, _TestDirName, _TestFileName, FullTestFileName, TestFileContent} =
create_test_dir_and_file(Config),
NewValue = <<"New Value!">>,
UpdatedValue = <<"123 Value!">>,
--- value replace , ------
?assert(object_exists(Config, FullTestFileName)),
?assertEqual(TestFileContent, get_file_content(Config, FullTestFileName)),
RequestHeaders1 = [?OBJECT_CONTENT_TYPE_HEADER, ?CDMI_VERSION_HEADER, user_1_token_header(Config)],
RequestBody1 = #{<<"value">> => NewValue},
RawRequestBody1 = json_utils:encode(RequestBody1),
{ok, Code1, _Headers1, _Response1} = do_request(Workers, FullTestFileName, put, RequestHeaders1, RawRequestBody1),
?assertEqual(?HTTP_204_NO_CONTENT, Code1),
?assert(object_exists(Config, FullTestFileName)),
?assertEqual(NewValue, get_file_content(Config, FullTestFileName)),
%%------------------------------
---- value update , ------
UpdateValue = <<"123">>,
RequestHeaders2 = [?OBJECT_CONTENT_TYPE_HEADER, ?CDMI_VERSION_HEADER, user_1_token_header(Config)],
RequestBody2 = #{<<"value">> => base64:encode(UpdateValue)},
RawRequestBody2 = json_utils:encode(RequestBody2),
{ok, Code2, _Headers2, _Response2} = do_request(Workers, FullTestFileName ++ "?value:0-2", put, RequestHeaders2, RawRequestBody2),
?assertEqual(?HTTP_204_NO_CONTENT, Code2),
?assert(object_exists(Config, FullTestFileName)),
?assertEqual(UpdatedValue, get_file_content(Config, FullTestFileName)),
%%------------------------------
%%--- value replace, http ------
RequestBody3 = TestFileContent,
{ok, Code3, _Headers3, _Response3} =
do_request(Workers, FullTestFileName, put, [user_1_token_header(Config)], RequestBody3),
?assertEqual(?HTTP_204_NO_CONTENT, Code3),
?assert(object_exists(Config, FullTestFileName)),
?assertEqual(TestFileContent,
get_file_content(Config, FullTestFileName)),
%%------------------------------
%%---- value update, http ------
UpdateValue = <<"123">>,
RequestHeaders4 = [{?HDR_CONTENT_RANGE, <<"bytes 0-2/3">>}],
{ok, Code4, _Headers4, _Response4} =
do_request(Workers, FullTestFileName,
put, [user_1_token_header(Config) | RequestHeaders4], UpdateValue),
?assertEqual(?HTTP_204_NO_CONTENT, Code4),
?assert(object_exists(Config, FullTestFileName)),
?assertEqual(<<"123t_file_content">>,
get_file_content(Config, FullTestFileName)),
%%------------------------------
---- value update2 , http -----
UpdateValue2 = <<"00">>,
RequestHeaders5 = [{?HDR_CONTENT_RANGE, <<"bytes 3-4/*">>}],
{ok, Code5, _Headers5, _Response5} =
do_request(Workers, FullTestFileName,
put, [user_1_token_header(Config) | RequestHeaders5], UpdateValue2),
?assertEqual(?HTTP_204_NO_CONTENT, Code5),
?assert(object_exists(Config, FullTestFileName)),
?assertEqual(<<"12300file_content">>,
get_file_content(Config, FullTestFileName)),
%%------------------------------
%%---- value update, http error ------
UpdateValue = <<"123">>,
RequestHeaders6 = [{?HDR_CONTENT_RANGE, <<"bytes 0-2,3-4/*">>}],
{ok, Code6, _Headers6, Response6} =
do_request(Workers, FullTestFileName, put, [user_1_token_header(Config) | RequestHeaders6],
UpdateValue),
ExpRestError = rest_test_utils:get_rest_error(?ERROR_BAD_DATA(?HDR_CONTENT_RANGE)),
?assertMatch(ExpRestError, {Code6, json_utils:decode(Response6)}),
?assert(object_exists(Config, FullTestFileName)),
?assertEqual(<<"12300file_content">>,
get_file_content(Config, FullTestFileName)).
%%------------------------------
use_supported_cdmi_version(Config) ->
% given
Workers = ?config(op_worker_nodes, Config),
RequestHeaders = [?CDMI_VERSION_HEADER, user_1_token_header(Config)],
% when
{ok, Code, _ResponseHeaders, _Response} =
do_request(Workers, "/random", get, RequestHeaders),
% then
?assertEqual(Code, ?HTTP_404_NOT_FOUND).
use_unsupported_cdmi_version(Config) ->
% given
Workers = ?config(op_worker_nodes, Config),
RequestHeaders = [{<<"X-CDMI-Specification-Version">>, <<"1.0.2">>}],
% when
{ok, Code, _ResponseHeaders, Response} =
do_request(Workers, "/random", get, RequestHeaders),
% then
ExpRestError = rest_test_utils:get_rest_error(?ERROR_BAD_VERSION([<<"1.1.1">>, <<"1.1">>])),
?assertMatch(ExpRestError, {Code, json_utils:decode(Response)}).
Tests dir creation ( cdmi container PUT ) , remember that every container URI ends
% with '/'
create_dir(Config) ->
Workers = ?config(op_worker_nodes, Config),
[{_SpaceId, SpaceName} | _] = ?config({spaces, <<"user1">>}, Config),
DirName = filename:join([binary_to_list(SpaceName), "toCreate1"]) ++ "/",
DirName2 = filename:join([binary_to_list(SpaceName), "toCreate2"]) ++ "/",
MissingParentName = filename:join([binary_to_list(SpaceName), "unknown"]) ++ "/",
DirWithoutParentName = filename:join(MissingParentName, "dir") ++ "/",
%%------ non-cdmi create -------
?assert(not object_exists(Config, DirName)),
{ok, Code1, _Headers1, _Response1} =
do_request(Workers, DirName, put, [user_1_token_header(Config)]),
?assertEqual(?HTTP_201_CREATED, Code1),
?assert(object_exists(Config, DirName)),
%%------------------------------
%%------ basic create ----------
?assert(not object_exists(Config, DirName2)),
RequestHeaders2 = [user_1_token_header(Config), ?CDMI_VERSION_HEADER, ?CONTAINER_CONTENT_TYPE_HEADER],
{ok, Code2, _Headers2, Response2} = do_request(Workers, DirName2, put, RequestHeaders2, []),
?assertEqual(?HTTP_201_CREATED, Code2),
CdmiResponse2 = (json_utils:decode(Response2)),
?assertMatch(#{<<"objectType">> := <<"application/cdmi-container">>}, CdmiResponse2),
?assertMatch(#{<<"objectName">> := <<"toCreate2/">>}, CdmiResponse2),
SpaceName1 = <<"/", SpaceName/binary, "/">>,
?assertMatch(#{<<"parentURI">> := SpaceName1}, CdmiResponse2),
?assertMatch(#{<<"completionStatus">> := <<"Complete">>}, CdmiResponse2),
?assertMatch(#{<<"children">> := []}, CdmiResponse2),
?assert(maps:get(<<"metadata">>, CdmiResponse2) =/= <<>>),
?assert(object_exists(Config, DirName2)),
%%------------------------------
%%---------- update ------------
?assert(object_exists(Config, DirName)),
RequestHeaders3 = [
user_1_token_header(Config), ?CDMI_VERSION_HEADER, ?CONTAINER_CONTENT_TYPE_HEADER
],
{ok, Code3, _Headers3, _Response3} =
do_request(Workers, DirName, put, RequestHeaders3, []),
?assertEqual(?HTTP_204_NO_CONTENT, Code3),
?assert(object_exists(Config, DirName)),
%%------------------------------
%%----- missing parent ---------
?assert(not object_exists(Config, MissingParentName)),
RequestHeaders4 = [
user_1_token_header(Config),
?CDMI_VERSION_HEADER,
?CONTAINER_CONTENT_TYPE_HEADER
],
{ok, Code4, _Headers4, Response4} =
do_request(Workers, DirWithoutParentName, put, RequestHeaders4, []),
ExpRestError = rest_test_utils:get_rest_error(?ERROR_POSIX(?ENOENT)),
?assertMatch(ExpRestError, {Code4, json_utils:decode(Response4)}).
%%------------------------------
% tests access to file by objectid
objectid(Config) ->
Workers = ?config(op_worker_nodes, Config),
{SpaceName, ShortTestDirName, TestDirName, TestFileName, _FullTestFileName, _TestFileContent} =
create_test_dir_and_file(Config),
TestDirNameCheck = list_to_binary(ShortTestDirName ++ "/"),
ShortTestDirNameBin = list_to_binary(ShortTestDirName),
TestFileNameBin = list_to_binary(TestFileName),
%%-------- / objectid ----------
RequestHeaders1 = [?CDMI_VERSION_HEADER, user_1_token_header(Config)],
{ok, Code1, Headers1, Response1} = do_request(Workers, "", get, RequestHeaders1, []),
?assertEqual(?HTTP_200_OK, Code1),
RequestHeaders0 = [?CDMI_VERSION_HEADER, user_1_token_header(Config)],
{ok, Code0, _Headers0, Response0} = do_request(Workers, SpaceName ++ "/", get, RequestHeaders0, []),
?assertEqual(?HTTP_200_OK, Code0),
CdmiResponse0 = json_utils:decode(Response0),
SpaceRootId = maps:get(<<"objectID">>, CdmiResponse0),
?assertMatch(#{?HDR_CONTENT_TYPE := <<"application/cdmi-container">>}, Headers1),
CdmiResponse1 = json_utils:decode(Response1),
?assertMatch(#{<<"objectName">> := <<"/">>}, CdmiResponse1),
RootId = maps:get(<<"objectID">>, CdmiResponse1, undefined),
?assertNotEqual(RootId, undefined),
?assert(is_binary(RootId)),
?assertMatch(#{<<"parentURI">> := <<>>}, CdmiResponse1),
?assertEqual(error, maps:find(<<"parentID">>, CdmiResponse1)),
?assertMatch(#{<<"capabilitiesURI">> := <<"cdmi_capabilities/container/">>}, CdmiResponse1),
%%------------------------------
%%------ /dir objectid ---------
RequestHeaders2 = [?CDMI_VERSION_HEADER, user_1_token_header(Config)],
{ok, Code2, _Headers2, Response2} = do_request(Workers, TestDirName ++ "/", get, RequestHeaders2, []),
?assertEqual(?HTTP_200_OK, Code2),
CdmiResponse2 = (json_utils:decode(Response2)),
?assertMatch(#{<<"objectName">> := TestDirNameCheck}, CdmiResponse2),
DirId = maps:get(<<"objectID">>, CdmiResponse2, undefined),
?assertNotEqual(DirId, undefined),
?assert(is_binary(DirId)),
ParentURI = <<"/", (list_to_binary(SpaceName))/binary, "/">>,
?assertMatch(#{<<"parentURI">> := ParentURI}, CdmiResponse2),
?assertMatch(#{<<"parentID">> := SpaceRootId}, CdmiResponse2),
?assertMatch(#{<<"capabilitiesURI">> := <<"cdmi_capabilities/container/">>}, CdmiResponse2),
%%------------------------------
--- /dir 1 / file.txt objectid ---
RequestHeaders3 = [?CDMI_VERSION_HEADER, user_1_token_header(Config)],
{ok, Code3, _Headers3, Response3} = do_request(Workers, filename:join(TestDirName, TestFileName), get, RequestHeaders3, []),
?assertEqual(?HTTP_200_OK, Code3),
CdmiResponse3 = json_utils:decode(Response3),
?assertMatch(#{<<"objectName">> := TestFileNameBin}, CdmiResponse3),
FileId = maps:get(<<"objectID">>, CdmiResponse3, undefined),
?assertNotEqual(FileId, undefined),
?assert(is_binary(FileId)),
ParentURI1 = <<"/", (list_to_binary(SpaceName))/binary, "/", ShortTestDirNameBin/binary, "/">>,
?assertMatch(#{<<"parentURI">> := ParentURI1}, CdmiResponse3),
?assertMatch(#{<<"parentID">> := DirId}, CdmiResponse3),
?assertMatch(#{<<"capabilitiesURI">> := <<"cdmi_capabilities/dataobject/">>}, CdmiResponse3),
%%------------------------------
%%---- get / by objectid -------
RequestHeaders4 = [?CDMI_VERSION_HEADER, user_1_token_header(Config)],
{ok, Code4, _Headers4, Response4} = do_request(Workers, "cdmi_objectid/" ++ binary_to_list(RootId) ++ "/", get, RequestHeaders4, []),
?assertEqual(?HTTP_200_OK, Code4),
CdmiResponse4 = json_utils:decode(Response4),
Meta1 = maps:remove(<<"cdmi_atime">>, maps:get(<<"metadata">>, CdmiResponse1)),
CdmiResponse1WithoutAtime = maps:put(<<"metadata">>, Meta1, CdmiResponse1),
Meta4 = maps:remove(<<"cdmi_atime">>, maps:get(<<"metadata">>, CdmiResponse4)),
CdmiResponse4WithoutAtime = maps:put(<<"metadata">>, Meta4, CdmiResponse4),
should be the same as in 1 ( except access time )
%%------------------------------
--- get /dir 1/ by objectid ----
RequestHeaders5 = [?CDMI_VERSION_HEADER, user_1_token_header(Config)],
{ok, Code5, _Headers5, Response5} = do_request(Workers, "cdmi_objectid/" ++ binary_to_list(DirId) ++ "/", get, RequestHeaders5, []),
?assertEqual(?HTTP_200_OK, Code5),
CdmiResponse5 = json_utils:decode(Response5),
Meta2 = maps:remove(<<"cdmi_atime">>, (maps:get(<<"metadata">>, CdmiResponse2))),
CdmiResponse2WithoutAtime = maps:put(<<"metadata">>, Meta2, CdmiResponse2),
Meta5 = maps:remove(<<"cdmi_atime">>, (maps:get(<<"metadata">>, CdmiResponse5))),
CdmiResponse5WithoutAtime = maps:put(<<"metadata">>, Meta5, CdmiResponse5),
should be the same as in 2 ( except parent and access time )
maps:remove(<<"parentURI">>, maps:remove(<<"parentID">>, CdmiResponse2WithoutAtime)),
maps:remove(<<"parentURI">>, maps:remove(<<"parentID">>, CdmiResponse5WithoutAtime))
),
%%------------------------------
get /dir 1 / file.txt by objectid
RequestHeaders6 = [?CDMI_VERSION_HEADER, user_1_token_header(Config)],
{ok, Code6, _Headers6, Response6} = do_request(Workers, "cdmi_objectid/" ++ binary_to_list(DirId) ++ "/" ++ TestFileName, get, RequestHeaders6, []),
?assertEqual(?HTTP_200_OK, Code6),
CdmiResponse6 = (json_utils:decode(Response6)),
Meta3 = maps:remove(<<"cdmi_atime">>, (maps:get(<<"metadata">>, CdmiResponse3))),
CdmiResponse3WithoutAtime = maps:put(<<"metadata">>, Meta3, CdmiResponse3),
Meta6 = maps:remove(<<"cdmi_atime">>, (maps:get(<<"metadata">>, CdmiResponse6))),
CdmiResponse6WithoutAtime = maps:put(<<"metadata">>, Meta6, CdmiResponse6),
should be the same as in 3 ( except access time )
maps:remove(<<"parentURI">>, maps:remove(<<"parentID">>, CdmiResponse3WithoutAtime)),
maps:remove(<<"parentURI">>, maps:remove(<<"parentID">>, CdmiResponse6WithoutAtime))
),
{ok, Code7, _Headers7, Response7} = do_request(Workers, "cdmi_objectid/" ++ binary_to_list(FileId), get, RequestHeaders6, []),
?assertEqual(?HTTP_200_OK, Code7),
CdmiResponse7 = (json_utils:decode(Response7)),
Meta7 = maps:remove(<<"cdmi_atime">>, (maps:get(<<"metadata">>, CdmiResponse7))),
CdmiResponse7WithoutAtime = maps:merge(#{<<"metadata">> => Meta7},maps:remove(<<"metadata">>, CdmiResponse7)),
should be the same as in 6 ( except parent and access time )
maps:remove(<<"parentURI">>, maps:remove(<<"parentID">>, CdmiResponse6WithoutAtime)),
maps:remove(<<"parentURI">>, maps:remove(<<"parentID">>, CdmiResponse7WithoutAtime))
),
%%------------------------------
%%---- unauthorized access to / by objectid -------
RequestHeaders8 = [?CDMI_VERSION_HEADER],
{ok, Code8, _, Response8} = do_request(Workers, "cdmi_objectid/" ++ binary_to_list(RootId) ++ "/", get, RequestHeaders8, []),
ExpRestError = rest_test_utils:get_rest_error(?ERROR_UNAUTHORIZED),
?assertMatch(ExpRestError, {Code8, json_utils:decode(Response8)}).
%%------------------------------
% tests if capabilities of objects, containers, and whole storage system are set properly
capabilities(Config) ->
Workers = ?config(op_worker_nodes, Config),
%%--- system capabilities ------
RequestHeaders8 = [?CDMI_VERSION_HEADER],
{ok, Code8, Headers8, Response8} =
do_request(Workers, "cdmi_capabilities/", get, RequestHeaders8, []),
?assertEqual(?HTTP_200_OK, Code8),
?assertMatch(#{?HDR_CONTENT_TYPE := <<"application/cdmi-capability">>}, Headers8),
CdmiResponse8 = (json_utils:decode(Response8)),
?assertMatch(#{<<"objectID">> := ?ROOT_CAPABILITY_ID}, CdmiResponse8),
?assertMatch(#{<<"objectName">> := <<?ROOT_CAPABILITY_PATH>>}, CdmiResponse8),
?assertMatch(#{<<"childrenrange">> := <<"0-1">>}, CdmiResponse8),
?assertMatch(#{<<"children">> := [<<"container/">>, <<"dataobject/">>]}, CdmiResponse8),
Capabilities = maps:get(<<"capabilities">>, CdmiResponse8),
?assertEqual(?ROOT_CAPABILITY_MAP, Capabilities),
%%------------------------------
%%-- container capabilities ----
RequestHeaders9 = [?CDMI_VERSION_HEADER],
{ok, Code9, _Headers9, Response9} =
do_request(Workers, "cdmi_capabilities/container/", get, RequestHeaders9, []),
?assertEqual(?HTTP_200_OK, Code9),
?assertMatch({ok, Code9, _, Response9}, do_request(Workers, "cdmi_objectid/" ++ binary_to_list(?CONTAINER_CAPABILITY_ID) ++ "/", get, RequestHeaders9, [])),
CdmiResponse9 = (json_utils:decode(Response9)),
?assertMatch(#{<<"parentURI">> := <<?ROOT_CAPABILITY_PATH>>}, CdmiResponse9),
?assertMatch(#{<<"parentID">> := ?ROOT_CAPABILITY_ID}, CdmiResponse9),
?assertMatch(#{<<"objectID">> := ?CONTAINER_CAPABILITY_ID}, CdmiResponse9),
?assertMatch(#{<<"objectName">> := <<"container/">>}, CdmiResponse9),
Capabilities2 = maps:get(<<"capabilities">>, CdmiResponse9),
?assertEqual(?CONTAINER_CAPABILITY_MAP, Capabilities2),
%%------------------------------
capabilities ---
RequestHeaders10 = [?CDMI_VERSION_HEADER],
{ok, Code10, _Headers10, Response10} =
do_request(Workers, "cdmi_capabilities/dataobject/", get, RequestHeaders10, []),
?assertEqual(?HTTP_200_OK, Code10),
?assertMatch({ok, Code10, _, Response10}, do_request(Workers, "cdmi_objectid/" ++ binary_to_list(?DATAOBJECT_CAPABILITY_ID) ++ "/", get, RequestHeaders10, [])),
CdmiResponse10 = (json_utils:decode(Response10)),
?assertMatch(#{<<"parentURI">> := <<?ROOT_CAPABILITY_PATH>>}, CdmiResponse10),
?assertMatch(#{<<"parentID">> := ?ROOT_CAPABILITY_ID}, CdmiResponse10),
?assertMatch(#{<<"objectID">> := ?DATAOBJECT_CAPABILITY_ID}, CdmiResponse10),
?assertMatch(#{<<"objectName">> := <<"dataobject/">>}, CdmiResponse10),
Capabilities3 = maps:get(<<"capabilities">>, CdmiResponse10),
?assertEqual(?DATAOBJECT_CAPABILITY_MAP, Capabilities3).
%%------------------------------
tests if returns ' moved permanently ' code when we forget about ' / ' in path
moved_permanently(Config) ->
[WorkerP2, _WorkerP1] = ?config(op_worker_nodes, Config),
[{_SpaceId, SpaceName} | _] = ?config({spaces, <<"user1">>}, Config),
FileName = filename:join([binary_to_list(SpaceName), "somedir", "somefile.txt"]),
DirNameWithoutSlash = filename:join([binary_to_list(SpaceName), "somedir"]),
DirName = DirNameWithoutSlash ++ "/",
FileNameWithSlash = FileName ++ "/",
mkdir(Config, DirName),
?assert(object_exists(Config, DirName)),
create_file(Config, FileName),
?assert(object_exists(Config, FileName)),
CDMIEndpoint = cdmi_test_utils:cdmi_endpoint(WorkerP2),
%%--------- dir test -----------
RequestHeaders1 = [
?CONTAINER_CONTENT_TYPE_HEADER,
?CDMI_VERSION_HEADER,
user_1_token_header(Config)
],
Location1 = list_to_binary(CDMIEndpoint ++ DirName),
{ok, Code1, Headers1, _Response1} =
do_request(WorkerP2, DirNameWithoutSlash, get, RequestHeaders1, []),
?assertEqual(?HTTP_302_FOUND, Code1),
?assertMatch(#{?HDR_LOCATION := Location1}, Headers1),
%%------------------------------
%%--------- dir test with QS-----------
RequestHeaders2 = [
?CONTAINER_CONTENT_TYPE_HEADER,
?CDMI_VERSION_HEADER,
user_1_token_header(Config)
],
Location2 = list_to_binary(CDMIEndpoint ++ DirName ++ "?example_qs=1"),
{ok, Code2, Headers2, _Response2} =
do_request(WorkerP2, DirNameWithoutSlash ++ "?example_qs=1", get, RequestHeaders2, []),
?assertEqual(?HTTP_302_FOUND, Code2),
?assertMatch(#{?HDR_LOCATION := Location2}, Headers2),
%%------------------------------
%%--------- file test ----------
RequestHeaders3 = [
?OBJECT_CONTENT_TYPE_HEADER,
?CDMI_VERSION_HEADER,
user_1_token_header(Config)
],
Location3 = list_to_binary(CDMIEndpoint ++ FileName),
{ok, Code3, Headers3, _Response3} =
do_request(WorkerP2, FileNameWithSlash, get, RequestHeaders3, []),
?assertEqual(?HTTP_302_FOUND, Code3),
?assertMatch(#{?HDR_LOCATION := Location3}, Headers3).
%%------------------------------
% tests req format checking
request_format_check(Config) ->
Workers = ?config(op_worker_nodes, Config),
[{_SpaceId, SpaceName} | _] = ?config({spaces, <<"user1">>}, Config),
FileToCreate = filename:join([binary_to_list(SpaceName), "file.txt"]),
DirToCreate = filename:join([binary_to_list(SpaceName), "dir"]) ++ "/",
FileContent = <<"File content!">>,
%%-- obj missing content-type --
RequestHeaders1 = [?CDMI_VERSION_HEADER, user_1_token_header(Config)],
RequestBody1 = #{<<"value">> => FileContent},
RawRequestBody1 = json_utils:encode(RequestBody1),
{ok, Code1, _Headers1, _Response1} = do_request(Workers, FileToCreate, put, RequestHeaders1, RawRequestBody1),
?assertEqual(?HTTP_201_CREATED, Code1),
%%------------------------------
missing content - type --
RequestHeaders3 = [?CDMI_VERSION_HEADER, user_1_token_header(Config)],
RequestBody3 = #{<<"metadata">> => <<"">>},
RawRequestBody3 = json_utils:encode(RequestBody3),
{ok, Code3, _Headers3, _Response3} = do_request(Workers, DirToCreate, put, RequestHeaders3, RawRequestBody3),
?assertEqual(?HTTP_201_CREATED, Code3).
%%------------------------------
tests mimetype and valuetransferencoding properties , they are part of cdmi - object and cdmi - container
% and should be changeble
mimetype_and_encoding(Config) ->
Workers = ?config(op_worker_nodes, Config),
{_SpaceName, _ShortTestDirName, TestDirName, TestFileName, _FullTestFileName, _TestFileContent} =
create_test_dir_and_file(Config),
[{_SpaceId, SpaceName} | _] = ?config({spaces, <<"user1">>}, Config),
get mimetype and valuetransferencoding of non - cdmi file
RequestHeaders1 = [?CDMI_VERSION_HEADER, user_1_token_header(Config)],
{ok, Code1, _Headers1, Response1} = do_request(Workers, filename:join(TestDirName, TestFileName) ++ "?mimetype;valuetransferencoding", get, RequestHeaders1, []),
?assertEqual(?HTTP_200_OK, Code1),
CdmiResponse1 = (json_utils:decode(Response1)),
?assertMatch(#{<<"mimetype">> := <<"application/octet-stream">>}, CdmiResponse1),
?assertMatch(#{<<"valuetransferencoding">> := <<"base64">>}, CdmiResponse1),
%%------------------------------
%%-- update mime and encoding --
RequestHeaders2 = [?CDMI_VERSION_HEADER, ?OBJECT_CONTENT_TYPE_HEADER, user_1_token_header(Config)],
RawBody2 = json_utils:encode(#{<<"valuetransferencoding">> => <<"utf-8">>,
<<"mimetype">> => <<"application/binary">>}),
{ok, Code2, _Headers2, _Response2} = do_request(Workers, filename:join(TestDirName, TestFileName), put, RequestHeaders2, RawBody2),
?assertEqual(?HTTP_204_NO_CONTENT, Code2),
{ok, Code3, _Headers3, Response3} = do_request(Workers, filename:join(TestDirName, TestFileName) ++ "?mimetype;valuetransferencoding", get, RequestHeaders2, []),
?assertEqual(?HTTP_200_OK, Code3),
CdmiResponse3 = (json_utils:decode(Response3)),
?assertMatch(#{<<"mimetype">> := <<"application/binary">>}, CdmiResponse3),
?assertMatch(#{<<"valuetransferencoding">> := <<"utf-8">>}, CdmiResponse3),
%%------------------------------
%% create file with given mime and encoding
FileName4 = filename:join([binary_to_list(SpaceName), "mime_file.txt"]),
FileContent4 = <<"some content">>,
RequestHeaders4 = [?CDMI_VERSION_HEADER, ?OBJECT_CONTENT_TYPE_HEADER, user_1_token_header(Config)],
RawBody4 = json_utils:encode(#{<<"valuetransferencoding">> => <<"utf-8">>,
<<"mimetype">> => <<"text/plain">>,
<<"value">> => FileContent4}),
{ok, Code4, _Headers4, Response4} = do_request(Workers, FileName4, put, RequestHeaders4, RawBody4),
?assertEqual(?HTTP_201_CREATED, Code4),
CdmiResponse4 = (json_utils:decode(Response4)),
?assertMatch(#{<<"mimetype">> := <<"text/plain">>}, CdmiResponse4),
RequestHeaders5 = [?CDMI_VERSION_HEADER, user_1_token_header(Config)],
{ok, Code5, _Headers5, Response5} = do_request(Workers, FileName4 ++ "?value;mimetype;valuetransferencoding", get, RequestHeaders5, []),
?assertEqual(?HTTP_200_OK, Code5),
CdmiResponse5 = (json_utils:decode(Response5)),
?assertMatch(#{<<"mimetype">> := <<"text/plain">>}, CdmiResponse5),
TODO VFS-7376 what do we return here if file contains valid utf-8 string and we read byte range ?
?assertMatch(#{<<"value">> := FileContent4}, CdmiResponse5),
%%------------------------------
%% create file with given mime and encoding using non-cdmi request
FileName6 = filename:join([binary_to_list(SpaceName), "mime_file_noncdmi.txt"]),
FileContent6 = <<"some content">>,
RequestHeaders6 = [{?HDR_CONTENT_TYPE, <<"text/plain; charset=utf-8">>}, user_1_token_header(Config)],
{ok, Code6, _Headers6, _Response6} = do_request(Workers, FileName6, put, RequestHeaders6, FileContent6),
?assertEqual(?HTTP_201_CREATED, Code6),
RequestHeaders7 = [?CDMI_VERSION_HEADER, user_1_token_header(Config)],
{ok, Code7, _Headers7, Response7} = do_request(Workers, FileName6 ++ "?value;mimetype;valuetransferencoding", get, RequestHeaders7, []),
?assertEqual(?HTTP_200_OK, Code7),
CdmiResponse7 = (json_utils:decode(Response7)),
?assertMatch(#{<<"mimetype">> := <<"text/plain">>}, CdmiResponse7),
?assertMatch(#{<<"valuetransferencoding">> := <<"utf-8">>}, CdmiResponse7),
?assertMatch(#{<<"value">> := FileContent6}, CdmiResponse7).
%%------------------------------
% tests reading&writing file at random ranges
out_of_range(Config) ->
Workers = ?config(op_worker_nodes, Config),
{_SpaceName, _ShortTestDirName, TestDirName, _TestFileName, _FullTestFileName, _TestFileContent} =
create_test_dir_and_file(Config),
[{_SpaceId, SpaceName} | _] = ?config({spaces, <<"user1">>}, Config),
FileName = filename:join([binary_to_list(SpaceName), "random_range_file.txt"]),
{ok, _} = create_file(Config, FileName),
%%---- reading out of range ---- (shuld return empty binary)
?assertEqual(<<>>, get_file_content(Config, FileName)),
RequestHeaders1 = [user_1_token_header(Config), ?CDMI_VERSION_HEADER],
RequestBody1 = json_utils:encode(#{<<"value">> => <<"data">>}),
{ok, Code1, _Headers1, Response1} = do_request(Workers, FileName ++ "?value:0-3", get, RequestHeaders1, RequestBody1),
?assertEqual(?HTTP_200_OK, Code1),
CdmiResponse1 = (json_utils:decode(Response1)),
?assertMatch(#{<<"value">> := <<>>}, CdmiResponse1),
%%------------------------------
------ writing at end -------- ( extend file )
?assertEqual(<<>>, get_file_content(Config, FileName)),
RequestHeaders2 = [user_1_token_header(Config), ?CDMI_VERSION_HEADER, ?OBJECT_CONTENT_TYPE_HEADER],
RequestBody2 = json_utils:encode(#{<<"value">> => base64:encode(<<"data">>)}),
{ok, Code2, _Headers2, _Response2} = do_request(Workers, FileName ++ "?value:0-3", put, RequestHeaders2, RequestBody2),
?assertEqual(?HTTP_204_NO_CONTENT, Code2),
?assertEqual(<<"data">>, get_file_content(Config, FileName)),
%%------------------------------
------ writing at random -------- ( should return zero bytes in any gaps )
%% RequestBody3 = json_utils:encode(#{<<"value">> => base64:encode(<<"data">>)}), todo fix -1443 and uncomment
{ ok , Code3 , _ Headers3 , _ Response3 } = do_request(Workers , FileName + + " ? value:10 - 13 " , put , RequestHeaders2 , ) ,
? assertEqual(?HTTP_204_NO_CONTENT , Code3 ) ,
%%
? assertEqual(<<100 , 97 , 116 , 97 , 0 , 0 , 0 , 0 , 0 , 0 , 100 , 97 , 116 , 97 > > , get_file_content(Config , FileName ) ) , % " data(6x<0_byte>)data "
%%------------------------------
----- random childrange ------ ( fail )
{ok, Code4, _Headers4, Response4} = do_request(Workers, TestDirName ++ "/?children:100-132", get, RequestHeaders2, []),
ExpRestError = rest_test_utils:get_rest_error(?ERROR_BAD_DATA(<<"childrenrange">>)),
?assertMatch(ExpRestError, {Code4, json_utils:decode(Response4)}).
%%------------------------------
move_copy_conflict(Config) ->
Workers = ?config(op_worker_nodes, Config),
[{_SpaceId, SpaceName} | _] = ?config({spaces, <<"user1">>}, Config),
FileName = filename:join([binary_to_list(SpaceName), "move_test_file.txt"]),
FileUri = list_to_binary(filename:join("/", FileName)),
FileData = <<"data">>,
create_file(Config, FileName),
write_to_file(Config, FileName, FileData, 0),
NewMoveFileName = "new_move_test_file",
--- conflicting mv / cpy ------- ( we can not move and copy at the same time )
?assertEqual(FileData, get_file_content(Config, FileName)),
RequestHeaders1 = [user_1_token_header(Config), ?CDMI_VERSION_HEADER, ?OBJECT_CONTENT_TYPE_HEADER],
RequestBody1 = json_utils:encode(#{<<"move">> => FileUri,
<<"copy">> => FileUri}),
{ok, Code1, _Headers1, Response1} = do_request(Workers, NewMoveFileName, put, RequestHeaders1, RequestBody1),
ExpRestError = rest_test_utils:get_rest_error(?ERROR_MALFORMED_DATA),
?assertMatch(ExpRestError, {Code1, json_utils:decode(Response1)}),
?assertEqual(FileData, get_file_content(Config, FileName)).
%%------------------------------
% tests copy and move operations on dataobjects and containers
move(Config) ->
Workers = ?config(op_worker_nodes, Config),
[{_SpaceId, SpaceName} | _] = ?config({spaces, <<"user1">>}, Config),
FileName = filename:join([binary_to_list(SpaceName), "move_test_file.txt"]),
DirName = filename:join([binary_to_list(SpaceName), "move_test_dir"]) ++ "/",
FileData = <<"data">>,
create_file(Config, FileName),
mkdir(Config, DirName),
write_to_file(Config, FileName, FileData, 0),
NewMoveFileName = filename:join([binary_to_list(SpaceName), "new_move_test_file"]),
NewMoveDirName = filename:join([binary_to_list(SpaceName), "new_move_test_dir"]) ++ "/",
%%----------- dir mv -----------
?assert(object_exists(Config, DirName)),
?assert(not object_exists(Config, NewMoveDirName)),
RequestHeaders2 = [user_1_token_header(Config), ?CDMI_VERSION_HEADER, ?CONTAINER_CONTENT_TYPE_HEADER],
RequestBody2 = json_utils:encode(#{<<"move">> => list_to_binary(DirName)}),
?assertMatch({ok, ?HTTP_201_CREATED, _Headers2, _Response2}, do_request(Workers, NewMoveDirName, put, RequestHeaders2, RequestBody2)),
?assert(not object_exists(Config, DirName)),
?assert(object_exists(Config, NewMoveDirName)),
%%------------------------------
%%---------- file mv -----------
?assert(object_exists(Config, FileName)),
?assert(not object_exists(Config, NewMoveFileName)),
?assertEqual(FileData, get_file_content(Config, FileName)),
RequestHeaders3 = [user_1_token_header(Config), ?CDMI_VERSION_HEADER, ?OBJECT_CONTENT_TYPE_HEADER],
RequestBody3 = json_utils:encode(#{<<"move">> => list_to_binary(FileName)}),
?assertMatch({ok, _Code3, _Headers3, _Response3}, do_request(Workers, NewMoveFileName, put, RequestHeaders3, RequestBody3)),
?assert(not object_exists(Config, FileName)),
?assert(object_exists(Config, NewMoveFileName)),
?assertEqual(FileData, get_file_content(Config, NewMoveFileName)).
%%------------------------------
copy(Config) ->
Workers = ?config(op_worker_nodes, Config),
[{_SpaceId, SpaceName} | _] = ?config({spaces, <<"user1">>}, Config),
---------- file cp ----------- ( copy file , with and acl )
% create file to copy
FileName2 = filename:join([binary_to_list(SpaceName), "copy_test_file.txt"]),
UserId1 = ?config({user_id, <<"user1">>}, Config),
UserName1 = ?config({user_name, <<"user1">>}, Config),
create_file(Config, FileName2),
FileData2 = <<"data">>,
FileAcl = [#access_control_entity{
acetype = ?allow_mask,
identifier = UserId1,
name = UserName1,
aceflags = ?no_flags_mask,
acemask = ?all_object_perms_mask
}],
JsonMetadata = #{<<"a">> => <<"b">>, <<"c">> => 2, <<"d">> => []},
Xattrs = [#xattr{name = <<"key1">>, value = <<"value1">>}, #xattr{name = <<"key2">>, value = <<"value2">>}],
ok = set_acl(Config, FileName2, FileAcl),
ok = set_json_metadata(Config, FileName2, JsonMetadata),
ok = add_xattrs(Config, FileName2, Xattrs),
{ok, _} = write_to_file(Config, FileName2, FileData2, 0),
% assert source file is created and destination does not exist
NewFileName2 = filename:join([binary_to_list(SpaceName), "copy_test_file2.txt"]),
?assert(object_exists(Config, FileName2)),
?assert(not object_exists(Config, NewFileName2)),
?assertEqual(FileData2, get_file_content(Config, FileName2)),
?assertEqual({ok, FileAcl}, get_acl(Config, FileName2)),
% copy file using cdmi
RequestHeaders4 = [user_1_token_header(Config), ?CDMI_VERSION_HEADER, ?OBJECT_CONTENT_TYPE_HEADER],
RequestBody4 = json_utils:encode(#{<<"copy">> => list_to_binary(FileName2)}),
{ok, Code4, _Headers4, _Response4} = do_request(Workers, NewFileName2, put, RequestHeaders4, RequestBody4),
?assertEqual(?HTTP_201_CREATED, Code4),
% assert new file is created
?assert(object_exists(Config, FileName2)),
?assert(object_exists(Config, NewFileName2)),
?assertEqual(FileData2, get_file_content(Config, NewFileName2)),
?assertEqual({ok, JsonMetadata}, get_json_metadata(Config, NewFileName2)),
?assertEqual(Xattrs ++ [#xattr{name = ?JSON_METADATA_KEY, value = JsonMetadata}],
get_xattrs(Config, NewFileName2)),
?assertEqual({ok, FileAcl}, get_acl(Config, NewFileName2)),
%%------------------------------
%%---------- dir cp ------------
% create dir to copy (with some subdirs and subfiles)
DirName2 = filename:join([binary_to_list(SpaceName), "copy_dir"]) ++ "/",
NewDirName2 = filename:join([binary_to_list(SpaceName), "new_copy_dir"]) ++ "/",
DirAcl = [#access_control_entity{
acetype = ?allow_mask,
identifier = UserId1,
name = UserName1,
aceflags = ?no_flags_mask,
acemask = ?all_container_perms_mask
}],
mkdir(Config, DirName2),
?assert(object_exists(Config, DirName2)),
set_acl(Config, DirName2, DirAcl),
add_xattrs(Config, DirName2, Xattrs),
mkdir(Config, filename:join(DirName2, "dir1")),
mkdir(Config, filename:join(DirName2, "dir2")),
create_file(Config, filename:join([DirName2, "dir1", "1"])),
create_file(Config, filename:join([DirName2, "dir1", "2"])),
create_file(Config, filename:join(DirName2, "3")),
% assert source files are successfully created, and destination file does not exist
?assert(object_exists(Config, DirName2)),
?assert(object_exists(Config, filename:join(DirName2, "dir1"))),
?assert(object_exists(Config, filename:join(DirName2, "dir2"))),
?assert(object_exists(Config, filename:join([DirName2, "dir1", "1"]))),
?assert(object_exists(Config, filename:join([DirName2, "dir1", "2"]))),
?assert(object_exists(Config, filename:join(DirName2, "3"))),
?assert(not object_exists(Config, NewDirName2)),
copy dir using
RequestHeaders5 = [user_1_token_header(Config), ?CDMI_VERSION_HEADER, ?CONTAINER_CONTENT_TYPE_HEADER],
RequestBody5 = json_utils:encode(#{<<"copy">> => list_to_binary(DirName2)}),
{ok, Code5, _Headers5, _Response5} = do_request(Workers, NewDirName2, put, RequestHeaders5, RequestBody5),
?assertEqual(?HTTP_201_CREATED, Code5),
% assert source files still exists
?assert(object_exists(Config, DirName2)),
?assert(object_exists(Config, filename:join(DirName2, "dir1"))),
?assert(object_exists(Config, filename:join(DirName2, "dir2"))),
?assert(object_exists(Config, filename:join([DirName2, "dir1", "1"]))),
?assert(object_exists(Config, filename:join([DirName2, "dir1", "2"]))),
?assert(object_exists(Config, filename:join(DirName2, "3"))),
% assert destination files have been created
?assert(object_exists(Config, NewDirName2)),
?assertEqual(Xattrs, get_xattrs(Config, NewDirName2)),
?assertEqual({ok, DirAcl}, get_acl(Config, NewDirName2)),
?assert(object_exists(Config, filename:join(NewDirName2, "dir1"))),
?assert(object_exists(Config, filename:join(NewDirName2, "dir2"))),
?assert(object_exists(Config, filename:join([NewDirName2, "dir1", "1"]))),
?assert(object_exists(Config, filename:join([NewDirName2, "dir1", "2"]))),
?assert(object_exists(Config, filename:join(NewDirName2, "3"))).
%%------------------------------
% tests cdmi and non-cdmi partial upload feature (requests with x-cdmi-partial flag set to true)
partial_upload(Config) ->
Workers = ?config(op_worker_nodes, Config),
[{_SpaceId, SpaceName} | _] = ?config({spaces, <<"user1">>}, Config),
FileName = filename:join([binary_to_list(SpaceName), "partial.txt"]),
FileName2 = filename:join([binary_to_list(SpaceName), "partial2.txt"]),
Chunk1 = <<"some">>,
Chunk2 = <<"_">>,
Chunk3 = <<"value">>,
------ request partial upload ------
?assert(not object_exists(Config, FileName)),
% upload first chunk of file
RequestHeaders1 = [user_1_token_header(Config), ?CDMI_VERSION_HEADER, ?OBJECT_CONTENT_TYPE_HEADER, {"X-CDMI-Partial", "true"}],
RequestBody1 = json_utils:encode(#{<<"value">> => Chunk1}),
{ok, Code1, _Headers1, Response1} = do_request(Workers, FileName, put, RequestHeaders1, RequestBody1),
?assertEqual(?HTTP_201_CREATED, Code1),
CdmiResponse1 = (json_utils:decode(Response1)),
?assertMatch(#{<<"completionStatus">> := <<"Processing">>}, CdmiResponse1),
upload second chunk of file
RequestBody2 = json_utils:encode(#{<<"value">> => base64:encode(Chunk2)}),
{ok, Code2, _Headers2, _Response2} = do_request(Workers, FileName ++ "?value:4-4", put, RequestHeaders1, RequestBody2),
?assertEqual(?HTTP_204_NO_CONTENT, Code2),
upload third chunk of file
RequestHeaders3 = [user_1_token_header(Config), ?CDMI_VERSION_HEADER, ?OBJECT_CONTENT_TYPE_HEADER],
RequestBody3 = json_utils:encode(#{<<"value">> => base64:encode(Chunk3)}),
{ok, Code3, _Headers3, _Response3} = do_request(Workers, FileName ++ "?value:5-9", put, RequestHeaders3, RequestBody3),
?assertEqual(?HTTP_204_NO_CONTENT, Code3),
% get created file and check its consistency
RequestHeaders4 = [user_1_token_header(Config), ?CDMI_VERSION_HEADER],
TODO Verify once after
CheckAllChunks = fun() ->
{ok, Code4, _Headers4, Response4} = do_request(Workers, FileName, get, RequestHeaders4, []),
?assertEqual(?HTTP_200_OK, Code4),
CdmiResponse4 = (json_utils:decode(Response4)),
?assertMatch(#{<<"completionStatus">> := <<"Complete">>}, CdmiResponse4),
?assertMatch(#{<<"valuetransferencoding">> := <<"utf-8">>}, CdmiResponse4),
maps:get(<<"value">>, CdmiResponse4)
end,
% File size event change is async
Chunks123 = <<Chunk1/binary, Chunk2/binary, Chunk3/binary>>,
?assertMatch(Chunks123, CheckAllChunks(), 2),
%%------------------------------
%%----- non-cdmi request partial upload -------
?assert(not object_exists(Config, FileName2)),
% upload first chunk of file
RequestHeaders5 = [user_1_token_header(Config), {<<"X-CDMI-Partial">>, <<"true">>}],
{ok, Code5, _Headers5, _Response5} = do_request(Workers, FileName2, put, RequestHeaders5, Chunk1),
?assertEqual(?HTTP_201_CREATED, Code5),
% check "completionStatus", should be set to "Processing"
{ok, Code5_1, _Headers5_1, Response5_1} = do_request(Workers, FileName2 ++ "?completionStatus", get, RequestHeaders4, Chunk1),
CdmiResponse5_1 = (json_utils:decode(Response5_1)),
?assertEqual(?HTTP_200_OK, Code5_1),
?assertMatch(#{<<"completionStatus">> := <<"Processing">>}, CdmiResponse5_1),
upload second chunk of file
RequestHeaders6 = [user_1_token_header(Config), {?HDR_CONTENT_RANGE, <<"bytes 4-4/10">>}, {<<"X-CDMI-Partial">>, <<"true">>}],
{ok, Code6, _Headers6, _Response6} = do_request(Workers, FileName2, put, RequestHeaders6, Chunk2),
?assertEqual(?HTTP_204_NO_CONTENT, Code6),
upload third chunk of file
RequestHeaders7 = [user_1_token_header(Config), {?HDR_CONTENT_RANGE, <<"bytes 5-9/10">>}, {<<"X-CDMI-Partial">>, <<"false">>}],
{ok, Code7, _Headers7, _Response7} = do_request(Workers, FileName2, put, RequestHeaders7, Chunk3),
?assertEqual(?HTTP_204_NO_CONTENT, Code7),
% get created file and check its consistency
RequestHeaders8 = [user_1_token_header(Config), ?CDMI_VERSION_HEADER],
TODO Verify once after
CheckAllChunks2 = fun() ->
{ok, Code8, _Headers8, Response8} = do_request(Workers, FileName2, get, RequestHeaders8, []),
?assertEqual(?HTTP_200_OK, Code8),
CdmiResponse8 = (json_utils:decode(Response8)),
?assertMatch(#{<<"completionStatus">> := <<"Complete">>}, CdmiResponse8),
base64:decode(maps:get(<<"value">>, CdmiResponse8))
end,
% File size event change is async
?assertMatch(Chunks123, CheckAllChunks2(), 2).
%%------------------------------
% tests access control lists
acl(Config) ->
[_WorkerP2, WorkerP1] = Workers = ?config(op_worker_nodes, Config),
[{_SpaceId, SpaceName} | _] = ?config({spaces, <<"user1">>}, Config),
Filename1 = filename:join([binary_to_list(SpaceName), "acl_test_file1"]),
Dirname1 = filename:join([binary_to_list(SpaceName), "acl_test_dir1"]) ++ "/",
UserId1 = ?config({user_id, <<"user1">>}, Config),
UserName1 = ?config({user_name, <<"user1">>}, Config),
Identifier1 = <<UserName1/binary, "#", UserId1/binary>>,
Read = ace:to_json(#access_control_entity{
acetype = ?allow_mask,
identifier = UserId1,
name = UserName1,
aceflags = ?no_flags_mask,
acemask = ?read_all_object_mask
}, cdmi),
ReadFull = #{
<<"acetype">> => ?allow,
<<"identifier">> => Identifier1,
<<"aceflags">> => ?no_flags,
<<"acemask">> => <<
?read_object/binary, ",",
?read_metadata/binary, ",",
?read_attributes/binary, ",",
?read_acl/binary
>>
},
Write = ace:to_json(#access_control_entity{
acetype = ?allow_mask,
identifier = UserId1,
name = UserName1,
aceflags = ?no_flags_mask,
acemask = ?write_all_object_mask
}, cdmi),
ReadWriteVerbose = #{
<<"acetype">> => ?allow,
<<"identifier">> => Identifier1,
<<"aceflags">> => ?no_flags,
<<"acemask">> => <<
?read_object/binary, ",",
?read_metadata/binary, ",",
?read_attributes/binary, ",",
?read_acl/binary, ",",
?write_object/binary, ",",
?write_metadata/binary, ",",
?write_attributes/binary, ",",
?delete/binary, ",",
?write_acl/binary
>>
},
WriteAcl = ace:to_json(#access_control_entity{
acetype = ?allow_mask,
identifier = UserId1,
name = UserName1,
aceflags = ?no_flags_mask,
acemask = ?write_acl_mask
}, cdmi),
Delete = ace:to_json(#access_control_entity{
acetype = ?allow_mask,
identifier = UserId1,
name = UserName1,
aceflags = ?no_flags_mask,
acemask = ?delete_mask
}, cdmi),
MetadataAclReadFull = json_utils:encode(#{<<"metadata">> => #{<<"cdmi_acl">> => [ReadFull, WriteAcl]}}),
MetadataAclDelete = json_utils:encode(#{<<"metadata">> => #{<<"cdmi_acl">> => [Delete]}}),
MetadataAclWrite = json_utils:encode(#{<<"metadata">> => #{<<"cdmi_acl">> => [Write]}}),
MetadataAclReadWrite = json_utils:encode(#{<<"metadata">> => #{<<"cdmi_acl">> => [Write, Read]}}),
MetadataAclReadWriteFull = json_utils:encode(#{<<"metadata">> => #{<<"cdmi_acl">> => [ReadWriteVerbose]}}),
%%----- read file test ---------
% create test file with dummy data
?assert(not object_exists(Config, Filename1)),
create_file(Config, filename:join("/", Filename1)),
write_to_file(Config, Filename1, <<"data">>, 0),
EaccesError = rest_test_utils:get_rest_error(?ERROR_POSIX(?EACCES)),
set acl to ' write ' and test cdmi / non - cdmi get request ( should return 403 forbidden )
RequestHeaders1 = [user_1_token_header(Config), ?CDMI_VERSION_HEADER, ?OBJECT_CONTENT_TYPE_HEADER],
{ok, ?HTTP_204_NO_CONTENT, _, _} = do_request(Workers, Filename1, put, RequestHeaders1, MetadataAclWrite),
{ok, Code1, _, Response1} = do_request(Workers, Filename1, get, RequestHeaders1, []),
?assertMatch(EaccesError, {Code1, json_utils:decode(Response1)}),
{ok, Code2, _, Response2} = do_request(Workers, Filename1, get, [user_1_token_header(Config)], []),
?assertMatch(EaccesError, {Code2, json_utils:decode(Response2)}),
?assertEqual({error, ?EACCES}, open_file(WorkerP1, Config, Filename1, read)),
% set acl to 'read&write' and test cdmi/non-cdmi get request (should succeed)
{ok, ?HTTP_204_NO_CONTENT, _, _} = do_request(Workers, Filename1, put, RequestHeaders1, MetadataAclReadWriteFull),
{ok, ?HTTP_200_OK, _, _} = do_request(Workers, Filename1, get, RequestHeaders1, []),
{ok, ?HTTP_200_OK, _, _} = do_request(Workers, Filename1, get, [user_1_token_header(Config)], []),
%%------------------------------
%%------- write file test ------
% set acl to 'read&write' and test cdmi/non-cdmi put request (should succeed)
{ok, ?HTTP_204_NO_CONTENT, _, _} = do_request(Workers, Filename1, put, RequestHeaders1, MetadataAclReadWrite),
RequestBody4 = json_utils:encode(#{<<"value">> => <<"new_data">>}),
{ok, ?HTTP_204_NO_CONTENT, _, _} = do_request(Workers, Filename1, put, RequestHeaders1, RequestBody4),
?assertEqual(<<"new_data">>, get_file_content(Config, Filename1)),
write_to_file(Config, Filename1, <<"1">>, 8),
?assertEqual(<<"new_data1">>, get_file_content(Config, Filename1)),
{ok, ?HTTP_204_NO_CONTENT, _, _} = do_request(Workers, Filename1, put, [user_1_token_header(Config)], <<"new_data2">>),
?assertEqual(<<"new_data2">>, get_file_content(Config, Filename1)),
set acl to ' read ' and test cdmi / non - cdmi put request ( should return 403 forbidden )
{ok, ?HTTP_204_NO_CONTENT, _, _} = do_request(Workers, Filename1, put, RequestHeaders1, MetadataAclReadFull),
RequestBody6 = json_utils:encode(#{<<"value">> => <<"new_data3">>}),
{ok, Code3, _, Response3} = do_request(Workers, Filename1, put, RequestHeaders1, RequestBody6),
?assertMatch(EaccesError, {Code3, json_utils:decode(Response3)}),
{ok, Code4, _, Response4} = do_request(Workers, Filename1, put, [user_1_token_header(Config)], <<"new_data4">>),
?assertMatch(EaccesError, {Code4, json_utils:decode(Response4)}),
?assertEqual(<<"new_data2">>, get_file_content(Config, Filename1)),
?assertEqual({error, ?EACCES}, open_file(WorkerP1, Config, Filename1, write)),
?assertEqual(<<"new_data2">>, get_file_content(Config, Filename1)),
%%------------------------------
%%------ delete file test ------
% set acl to 'delete'
{ok, ?HTTP_204_NO_CONTENT, _, _} = do_request(Workers, Filename1, put, RequestHeaders1, MetadataAclDelete),
% delete file
{ok, ?HTTP_204_NO_CONTENT, _, _} = do_request(Workers, Filename1, delete, [user_1_token_header(Config)], []),
?assert(not object_exists(Config, Filename1)),
%%------------------------------
%%--- read write dir test ------
?assert(not object_exists(Config, Dirname1)),
mkdir(Config, filename:join("/", Dirname1)),
File1 = filename:join(Dirname1, "1"),
File2 = filename:join(Dirname1, "2"),
File3 = filename:join(Dirname1, "3"),
File4 = filename:join(Dirname1, "4"),
DirRead = ace:to_json(#access_control_entity{
acetype = ?allow_mask,
identifier = UserId1,
name = UserName1,
aceflags = ?no_flags_mask,
acemask = ?read_all_container_mask bor ?traverse_container_mask
}, cdmi),
DirWrite = ace:to_json(#access_control_entity{
acetype = ?allow_mask,
identifier = UserId1,
name = UserName1,
aceflags = ?no_flags_mask,
acemask = ?write_all_container_mask bor ?traverse_container_mask
}, cdmi),
DirMetadataAclReadWrite = json_utils:encode(#{<<"metadata">> => #{<<"cdmi_acl">> => [DirWrite, DirRead]}}),
DirMetadataAclRead = json_utils:encode(#{<<"metadata">> => #{<<"cdmi_acl">> => [DirRead, WriteAcl]}}),
DirMetadataAclWrite = json_utils:encode(#{<<"metadata">> => #{<<"cdmi_acl">> => [DirWrite]}}),
% set acl to 'read&write' and test cdmi get request (should succeed)
RequestHeaders2 = [user_1_token_header(Config), ?CDMI_VERSION_HEADER, ?CONTAINER_CONTENT_TYPE_HEADER],
{ok, ?HTTP_204_NO_CONTENT, _, _} = do_request(Workers, Dirname1, put, RequestHeaders2, DirMetadataAclReadWrite),
{ok, ?HTTP_200_OK, _, _} = do_request(Workers, Dirname1, get, RequestHeaders2, []),
% create files in directory (should succeed)
{ok, ?HTTP_201_CREATED, _, _} = do_request(Workers, File1, put, [user_1_token_header(Config)], []),
?assert(object_exists(Config, File1)),
{ok, ?HTTP_201_CREATED, _, _} = do_request(Workers, File2, put, RequestHeaders1, <<"{\"value\":\"val\"}">>),
?assert(object_exists(Config, File2)),
create_file(Config, File3),
?assert(object_exists(Config, File3)),
% delete files (should succeed)
{ok, ?HTTP_204_NO_CONTENT, _, _} = do_request(Workers, File1, delete, [user_1_token_header(Config)], []),
?assert(not object_exists(Config, File1)),
{ok, ?HTTP_204_NO_CONTENT, _, _} = do_request(Workers, File2, delete, [user_1_token_header(Config)], []),
?assert(not object_exists(Config, File2)),
set acl to ' write ' and test cdmi get request ( should return 403 forbidden )
{ok, ?HTTP_204_NO_CONTENT, _, _} = do_request(Workers, Dirname1, put, RequestHeaders2, DirMetadataAclWrite),
{ok, Code5, _, Response5} = do_request(Workers, Dirname1, get, RequestHeaders2, []),
?assertMatch(EaccesError, {Code5, json_utils:decode(Response5)}),
set acl to ' read ' and test cdmi put request ( should return 403 forbidden )
{ok, ?HTTP_204_NO_CONTENT, _, _} = do_request(Workers, Dirname1, put, RequestHeaders2, DirMetadataAclRead),
{ok, ?HTTP_200_OK, _, _} = do_request(Workers, Dirname1, get, RequestHeaders2, []),
{ok, Code6, _, Response6} = do_request(Workers, Dirname1, put, RequestHeaders2, json_utils:encode(#{<<"metadata">> => #{<<"my_meta">> => <<"value">>}})),
?assertMatch(EaccesError, {Code6, json_utils:decode(Response6)}),
create files ( should return 403 forbidden )
{ok, Code7, _, Response7} = do_request(Workers, File1, put, [user_1_token_header(Config)], []),
?assertMatch(EaccesError, {Code7, json_utils:decode(Response7)}),
?assert(not object_exists(Config, File1)),
{ok, Code8, _, Response8} = do_request(Workers, File2, put, RequestHeaders1, <<"{\"value\":\"val\"}">>),
?assertMatch(EaccesError, {Code8, json_utils:decode(Response8)}),
?assert(not object_exists(Config, File2)),
?assertEqual({error, ?EACCES}, create_file(Config, File4)),
?assert(not object_exists(Config, File4)),
delete files ( should return 403 forbidden )
{ok, Code9, _, Response9} = do_request(Workers, File3, delete, [user_1_token_header(Config)], []),
?assertMatch(EaccesError, {Code9, json_utils:decode(Response9)}),
?assert(object_exists(Config, File3)).
%%------------------------------
% test error handling
errors(Config) ->
Workers = ?config(op_worker_nodes, Config),
{SpaceName, _ShortTestDirName, TestDirName, _TestFileName, _FullTestFileName, _TestFileContent} =
create_test_dir_and_file(Config),
%%---- unauthorized access -----
{ok, Code1, _Headers1, Response1} =
do_request(Workers, TestDirName, get, [], []),
ExpRestError1 = rest_test_utils:get_rest_error(?ERROR_UNAUTHORIZED),
?assertMatch(ExpRestError1, {Code1, json_utils:decode(Response1)}),
%%------------------------------
%%----- wrong create path ------
RequestHeaders2 = [
user_1_token_header(Config),
?CDMI_VERSION_HEADER,
?CONTAINER_CONTENT_TYPE_HEADER
],
{ok, Code2, _Headers2, Response2} =
do_request(Workers, SpaceName ++ "/test_dir", put, RequestHeaders2, []),
ExpRestError2 = rest_test_utils:get_rest_error(?ERROR_BAD_VALUE_IDENTIFIER(<<"path">>)),
?assertMatch(ExpRestError2, {Code2, json_utils:decode(Response2)}),
%%------------------------------
---- wrong create path 2 -----
RequestHeaders3 = [
user_1_token_header(Config),
?CDMI_VERSION_HEADER,
?OBJECT_CONTENT_TYPE_HEADER
],
{ok, Code3, _Headers3, Response3} =
do_request(Workers, SpaceName ++ "/test_dir/", put, RequestHeaders3, []),
ExpRestError3 = rest_test_utils:get_rest_error(?ERROR_BAD_VALUE_IDENTIFIER(<<"path">>)),
?assertMatch(ExpRestError3, {Code3, json_utils:decode(Response3)}),
%%------------------------------
%%-------- wrong base64 --------
RequestHeaders4 = [
user_1_token_header(Config),
?CDMI_VERSION_HEADER,
?OBJECT_CONTENT_TYPE_HEADER
],
RequestBody4 = json_utils:encode(#{<<"valuetransferencoding">> => <<"base64">>,
<<"value">> => <<"#$%">>}),
{ok, Code4, _Headers4, Response4} =
do_request(Workers, SpaceName ++ "/some_file_b64", put, RequestHeaders4, RequestBody4),
ExpRestError4 = rest_test_utils:get_rest_error(?ERROR_BAD_DATA(<<"base64">>)),
?assertMatch(ExpRestError4, {Code4, json_utils:decode(Response4)}),
%%------------------------------
%%-- reading non-existing file --
RequestHeaders6 = [
user_1_token_header(Config),
?CDMI_VERSION_HEADER,
?OBJECT_CONTENT_TYPE_HEADER
],
{ok, Code6, _Headers6, _Response6} =
do_request(Workers, SpaceName ++ "/nonexistent_file", get, RequestHeaders6),
?assertEqual(Code6, ?HTTP_404_NOT_FOUND),
%%------------------------------
%%--- listing non-existing dir -----
RequestHeaders7 = [
user_1_token_header(Config),
?CDMI_VERSION_HEADER,
?CONTAINER_CONTENT_TYPE_HEADER
],
{ok, Code7, _Headers7, _Response7} =
do_request(Workers, SpaceName ++ "/nonexisting_dir/", get, RequestHeaders7),
?assertEqual(Code7, ?HTTP_404_NOT_FOUND),
%%------------------------------
%%--- open binary file without permission -----
File8 = filename:join([SpaceName, "file8"]),
FileContent8 = <<"File content...">>,
create_file(Config, File8),
?assertEqual(object_exists(Config, File8), true),
write_to_file(Config, File8, FileContent8, ?FILE_BEGINNING),
?assertEqual(get_file_content(Config, File8), FileContent8),
RequestHeaders8 = [user_1_token_header(Config)],
mock_opening_file_without_perms(Config),
{ok, Code8, _Headers8, Response8} =
do_request(Workers, File8, get, RequestHeaders8),
unmock_opening_file_without_perms(Config),
ExpRestError8 = rest_test_utils:get_rest_error(?ERROR_POSIX(?EACCES)),
?assertMatch(ExpRestError8, {Code8, json_utils:decode(Response8)}),
%%------------------------------
%%--- open cdmi file without permission -----
File9 = filename:join([SpaceName, "file9"]),
FileContent9 = <<"File content...">>,
create_file(Config, File9),
?assertEqual(object_exists(Config, File9), true),
write_to_file(Config, File9, FileContent9, ?FILE_BEGINNING),
?assertEqual(get_file_content(Config, File9), FileContent9),
RequestHeaders9 = [
user_1_token_header(Config),
?CDMI_VERSION_HEADER,
?OBJECT_CONTENT_TYPE_HEADER
],
mock_opening_file_without_perms(Config),
{ok, Code9, _Headers9, Response9} =
do_request(Workers, File9, get, RequestHeaders9),
unmock_opening_file_without_perms(Config),
ExpRestError9 = rest_test_utils:get_rest_error(?ERROR_POSIX(?EACCES)),
?assertMatch(ExpRestError9, {Code9, json_utils:decode(Response9)}).
%%------------------------------
accept_header(Config) ->
Workers = ?config(op_worker_nodes, Config),
AcceptHeader = {?HDR_ACCEPT, <<"*/*">>},
% when
{ok, Code1, _Headers1, _Response1} =
do_request(Workers, [], get,
[user_1_token_header(Config), ?CDMI_VERSION_HEADER, AcceptHeader], []),
% then
?assertEqual(?HTTP_200_OK, Code1).
create_raw_file_with_cdmi_version_header_should_succeed(Config) ->
% given
Workers = ?config(op_worker_nodes, Config),
[{_SpaceId, SpaceName} | _] = ?config({spaces, <<"user1">>}, Config),
% when
?assertMatch(
{ok, ?HTTP_201_CREATED, _ResponseHeaders, _Response},
do_request(Workers, binary_to_list(SpaceName) ++ "/file1", put,
[?CDMI_VERSION_HEADER, user_1_token_header(Config)], <<"data">>
)),
?assertMatch(
{ok, ?HTTP_201_CREATED, _ResponseHeaders2, _Response2},
do_request(Workers, binary_to_list(SpaceName) ++ "/file2", put,
[?CDMI_VERSION_HEADER, user_1_token_header(Config), {?HDR_CONTENT_TYPE, <<"text/plain">>}],
<<"data2">>
)).
create_raw_dir_with_cdmi_version_header_should_succeed(Config) ->
% given
Workers = ?config(op_worker_nodes, Config),
[{_SpaceId, SpaceName} | _] = ?config({spaces, <<"user1">>}, Config),
% when
?assertMatch(
{ok, ?HTTP_201_CREATED, _ResponseHeaders, _Response},
do_request(Workers, binary_to_list(SpaceName) ++ "/dir1/", put,
[?CDMI_VERSION_HEADER, user_1_token_header(Config)]
)),
?assertMatch(
{ok, ?HTTP_201_CREATED, _ResponseHeaders2, _Response2},
do_request(Workers, binary_to_list(SpaceName) ++ "/dir2/", put,
[?CDMI_VERSION_HEADER, user_1_token_header(Config), {?HDR_CONTENT_TYPE, <<"application/json">>}],
<<"{}">>
)).
create_cdmi_file_without_cdmi_version_header_should_fail(Config) ->
% given
Workers = ?config(op_worker_nodes, Config),
[{_SpaceId, SpaceName} | _] = ?config({spaces, <<"user1">>}, Config),
% when
{ok, Code, _ResponseHeaders, Response} = do_request(
Workers, binary_to_list(SpaceName) ++ "/file1", put,
[user_1_token_header(Config), ?OBJECT_CONTENT_TYPE_HEADER], <<"{}">>
),
ExpRestError = rest_test_utils:get_rest_error(?ERROR_MISSING_REQUIRED_VALUE(<<"version">>)),
?assertMatch(ExpRestError, {Code, json_utils:decode(Response)}).
create_cdmi_dir_without_cdmi_version_header_should_fail(Config) ->
% given
Workers = ?config(op_worker_nodes, Config),
[{_SpaceId, SpaceName} | _] = ?config({spaces, <<"user1">>}, Config),
% when
{ok, Code, _ResponseHeaders, Response} = do_request(
Workers, binary_to_list(SpaceName) ++ "/dir1/", put,
[user_1_token_header(Config), ?CONTAINER_CONTENT_TYPE_HEADER]
),
ExpRestError = rest_test_utils:get_rest_error(?ERROR_MISSING_REQUIRED_VALUE(<<"version">>)),
?assertMatch(ExpRestError, {Code, json_utils:decode(Response)}).
%%%===================================================================
SetUp and TearDown functions
%%%===================================================================
%%%===================================================================
Internal functions
%%%===================================================================
do_request(Node, RestSubpath, Method, Headers) ->
do_request(Node, RestSubpath, Method, Headers, []).
do_request([_ | _] = Nodes, RestSubpath, get, Headers, Body) ->
[FRes | _] = Responses = lists:filtermap(fun(Node) ->
case make_request(Node, RestSubpath, get, Headers, Body) of
space_not_supported -> false;
Result -> {true, Result}
end
end, Nodes),
case FRes of
{error, _} ->
ok;
{ok, ?HTTP_206_PARTIAL_CONTENT, #{?HDR_CONTENT_TYPE := <<"multipart/byteranges", _/binary>>}, _} ->
lists:foreach(fun({ok, LCode, _, _}) ->
?assertMatch(?HTTP_206_PARTIAL_CONTENT, LCode)
end, Responses);
{ok, RCode, _, RResponse} ->
RResponseJSON = try_to_decode(RResponse),
lists:foreach(fun({ok, LCode, _, LResponse}) ->
LResponseJSON = try_to_decode(LResponse),
ct : print("~p ~p ~p ~p " , [ RCode , RResponseJSON , LCode , ] ) , % % Useful log for debugging
?assertMatch({RCode, RResponseJSON}, {LCode, LResponseJSON})
end, Responses)
end,
FRes;
do_request([_ | _] = Nodes, RestSubpath, Method, Headers, Body) ->
lists:foldl(fun
(Node, space_not_supported) ->
make_request(Node, RestSubpath, Method, Headers, Body);
(_Node, Result) ->
Result
end, space_not_supported, lists_utils:shuffle(Nodes));
do_request(Node, RestSubpath, Method, Headers, Body) when is_atom(Node) ->
make_request(Node, RestSubpath, Method, Headers, Body).
make_request(Node, RestSubpath, Method, Headers, Body) ->
case cdmi_test_utils:do_request(Node, RestSubpath, Method, Headers, Body) of
{ok, RespCode, _RespHeaders, RespBody} = Result ->
case is_space_supported(Node, RestSubpath) of
true ->
Result;
false ->
% Returned error may not be necessarily ?ERROR_SPACE_NOT_SUPPORTED(_, _)
% as some errors may be thrown even before file path resolution attempt
% (and such errors are explicitly checked by some tests),
% but it should never be any successful response
?assert(RespCode >= 300),
case {RespCode, try_to_decode(RespBody)} of
{?HTTP_400_BAD_REQUEST, #{<<"error">> := #{
<<"id">> := <<"spaceNotSupportedBy">>
}}}->
space_not_supported;
_ ->
Result
end
end;
{error, _} = Error ->
Error
end.
is_space_supported(_Node, "") ->
true;
is_space_supported(_Node, "/") ->
true;
is_space_supported(Node, CdmiPath) ->
{ok, SuppSpaces} = rpc:call(Node, provider_logic, get_spaces, []),
SpecialObjectIds = [?ROOT_CAPABILITY_ID, ?CONTAINER_CAPABILITY_ID, ?DATAOBJECT_CAPABILITY_ID],
case binary:split(list_to_binary(CdmiPath), <<"/">>, [global, trim_all]) of
[<<"cdmi_capabilities">> | _] ->
true;
[<<"cdmi_objectid">>, ObjectId | _] ->
case lists:member(ObjectId, SpecialObjectIds) of
true ->
true;
false ->
{ok, FileGuid} = file_id:objectid_to_guid(ObjectId),
SpaceId = file_id:guid_to_space_id(FileGuid),
SpaceId == <<"rootDirVirtualSpaceId">> orelse lists:member(SpaceId, SuppSpaces)
end;
[SpaceName | _] ->
lists:any(fun(SpaceId) -> get_space_name(Node, SpaceId) == SpaceName end, SuppSpaces)
end.
get_space_name(Node, SpaceId) ->
{ok, SpaceName} = rpc:call(Node, space_logic, get_name, [<<"0">>, SpaceId]),
SpaceName.
try_to_decode(Body) ->
try
remove_times_metadata(json_utils:decode(Body))
catch _:invalid_json ->
Body
end.
remove_times_metadata(ResponseJSON) ->
Metadata = maps:get(<<"metadata">>, ResponseJSON, undefined),
case Metadata of
undefined -> ResponseJSON;
_ -> Metadata1 = maps:without( [<<"cdmi_ctime">>,
<<"cdmi_atime">>,
<<"cdmi_mtime">>], Metadata),
maps:put(<<"metadata">>, Metadata1, ResponseJSON)
end.
create_test_dir_and_file(Config) ->
[{_SpaceId, SpaceName} | _] = ?config({spaces, <<"user1">>}, Config),
TestDirName = get_random_string(),
TestFileName = get_random_string(),
FullTestDirName = filename:join([binary_to_list(SpaceName), TestDirName]),
FullTestFileName = filename:join(["/", binary_to_list(SpaceName), TestDirName, TestFileName]),
TestFileContent = <<"test_file_content">>,
case object_exists(Config, TestDirName) of
false ->
{ok, _} = mkdir(Config, FullTestDirName),
?assert(object_exists(Config, FullTestDirName)),
{ok, _} = create_file(Config, FullTestFileName),
?assert(object_exists(Config, FullTestFileName)),
{ok, _} = write_to_file(Config, FullTestFileName, TestFileContent, 0),
?assertEqual(TestFileContent, get_file_content(Config, FullTestFileName));
true -> ok
end,
{binary_to_list(SpaceName), TestDirName, FullTestDirName, TestFileName, FullTestFileName, TestFileContent}.
object_exists(Config, Path) ->
[_WorkerP2, WorkerP1] = ?config(op_worker_nodes, Config),
SessionId = ?config({session_id, {<<"user1">>, ?GET_DOMAIN(WorkerP1)}}, Config),
case lfm_proxy:stat(WorkerP1, SessionId,
{path, absolute_binary_path(Path)}) of
{ok, _} ->
true;
{error, ?ENOENT} ->
false
end.
create_file(Config, Path) ->
[_WorkerP2, WorkerP1] = ?config(op_worker_nodes, Config),
SessionId = ?config({session_id, {<<"user1">>, ?GET_DOMAIN(WorkerP1)}}, Config),
lfm_proxy:create(WorkerP1, SessionId, absolute_binary_path(Path)).
open_file(Worker, Config, Path, OpenMode) ->
SessionId = ?config({session_id, {<<"user1">>, ?GET_DOMAIN(Worker)}}, Config),
lfm_proxy:open(Worker, SessionId, {path, absolute_binary_path(Path)}, OpenMode).
write_to_file(Config, Path, Data, Offset) ->
[_WorkerP2, WorkerP1] = ?config(op_worker_nodes, Config),
{ok, FileHandle} = open_file(WorkerP1, Config, Path, write),
Result = lfm_proxy:write(WorkerP1, FileHandle, Offset, Data),
lfm_proxy:close(WorkerP1, FileHandle),
Result.
get_file_content(Config, Path) ->
[WorkerP2, _WorkerP1] = ?config(op_worker_nodes, Config),
{ok, FileHandle} = open_file(WorkerP2, Config, Path, read),
Result = case lfm_proxy:read(WorkerP2, FileHandle, ?FILE_BEGINNING, ?INFINITY) of
{error, Error} -> {error, Error};
{ok, Content} -> Content
end,
lfm_proxy:close(WorkerP2, FileHandle),
Result.
mkdir(Config, Path) ->
[_WorkerP2, WorkerP1] = ?config(op_worker_nodes, Config),
SessionId = ?config({session_id, {<<"user1">>, ?GET_DOMAIN(WorkerP1)}}, Config),
lfm_proxy:mkdir(WorkerP1, SessionId, absolute_binary_path(Path)).
set_acl(Config, Path, Acl) ->
[_WorkerP2, WorkerP1] = ?config(op_worker_nodes, Config),
SessionId = ?config({session_id, {<<"user1">>, ?GET_DOMAIN(WorkerP1)}}, Config),
lfm_proxy:set_acl(WorkerP1, SessionId, {path, absolute_binary_path(Path)}, Acl).
get_acl(Config, Path) ->
[WorkerP2, _WorkerP1] = ?config(op_worker_nodes, Config),
SessionId = ?config({session_id, {<<"user1">>, ?GET_DOMAIN(WorkerP2)}}, Config),
lfm_proxy:get_acl(WorkerP2, SessionId, {path, absolute_binary_path(Path)}).
add_xattrs(Config, Path, Xattrs) ->
[_WorkerP2, WorkerP1] = ?config(op_worker_nodes, Config),
SessionId = ?config({session_id, {<<"user1">>, ?GET_DOMAIN(WorkerP1)}}, Config),
lists:foreach(fun(Xattr) ->
ok = lfm_proxy:set_xattr(WorkerP1, SessionId, {path, absolute_binary_path(Path)}, Xattr)
end, Xattrs).
get_xattrs(Config, Path) ->
[WorkerP2, _WorkerP1] = ?config(op_worker_nodes, Config),
SessionId = ?config({session_id, {<<"user1">>, ?GET_DOMAIN(WorkerP2)}}, Config),
{ok, Xattrs} = lfm_proxy:list_xattr(WorkerP2, SessionId, {path, absolute_binary_path(Path)}, false, true),
lists:filtermap(
fun
(<<"cdmi_", _/binary>>) ->
false;
(XattrName) ->
{ok, Xattr} = lfm_proxy:get_xattr(WorkerP2, SessionId, {path, absolute_binary_path(Path)}, XattrName),
{true, Xattr}
end, Xattrs).
set_json_metadata(Config, Path, JsonTerm) ->
[WorkerP2, _WorkerP1] = ?config(op_worker_nodes, Config),
SessionId = ?config({session_id, {<<"user1">>, ?GET_DOMAIN(WorkerP2)}}, Config),
{ok, FileGuid} = lfm_proxy:resolve_guid(WorkerP2, SessionId, absolute_binary_path(Path)),
ok = opt_file_metadata:set_custom_metadata(WorkerP2, SessionId, ?FILE_REF(FileGuid), json, JsonTerm, []).
get_json_metadata(Config, Path) ->
[WorkerP2, _WorkerP1] = ?config(op_worker_nodes, Config),
SessionId = ?config({session_id, {<<"user1">>, ?GET_DOMAIN(WorkerP2)}}, Config),
{ok, FileGuid} = lfm_proxy:resolve_guid(WorkerP2, SessionId, absolute_binary_path(Path)),
opt_file_metadata:get_custom_metadata(WorkerP2, SessionId, ?FILE_REF(FileGuid), json, [], false).
absolute_binary_path(Path) ->
list_to_binary(ensure_begins_with_slash(Path)).
ensure_begins_with_slash(Path) ->
ReversedBinary = list_to_binary(lists:reverse(Path)),
lists:reverse(binary_to_list(filepath_utils:ensure_ends_with_slash(ReversedBinary))).
mock_opening_file_without_perms(Config) ->
Workers = ?config(op_worker_nodes, Config),
test_node_starter:load_modules(Workers, [?MODULE]),
test_utils:mock_new(Workers, lfm),
test_utils:mock_expect(
Workers, lfm, monitored_open, fun(_, _, _) -> {error, ?EACCES} end).
unmock_opening_file_without_perms(Config) ->
Workers = ?config(op_worker_nodes, Config),
test_utils:mock_unload(Workers, lfm).
get_random_string() ->
get_random_string(10, "abcdefghijklmnopqrstuvwxyz1234567890ABCDEFGHIJKLMNOPQRSTUVWXYZ").
get_random_string(Length, AllowedChars) ->
lists:foldl(fun(_, Acc) ->
[lists:nth(rand:uniform(length(AllowedChars)),
AllowedChars)]
++ Acc
end, [], lists:seq(1, Length)).
| null | https://raw.githubusercontent.com/onedata/op-worker/4b5d0bec945485800fb17a4b6674ce0dc9892edf/test_distributed/cdmi_test_base.erl | erlang | -------------------------------------------------------------------
@end
-------------------------------------------------------------------
@doc
@end
-------------------------------------------------------------------
===================================================================
Test functions
===================================================================
Tests cdmi container GET request (also refered as LIST)
------ list basic dir --------
------------------------------
------ list root space dir ---------
------------------------------
------------------------------
-- selective params list -----
------------------------------
---- childrenrange list ------
------------------------------
file conent is returned as response body), or with cdmi header (the response
contains json string of type: application/cdmi-object, and we can specify what
parameters we need by listing then as ';' separated list after '?' in URL )
-------- basic read ----------
------------------------------
-- selective params read -----
------------------------------
--- selective value read -----
------------------------------
------- noncdmi read --------
------------------------------
------------------------------
-------- read by id ----------
------------------------------
selective value single range read non-cdmi
------------------------------
selective value multi range read non-cdmi
------------------------------
------------------------------
read empty file non-cdmi without Range
------------------------------
Tests cdmi metadata read on object GET request.
-------- create file with user metadata --------
-- selective metadata read -----
-- selective metadata read with prefix -----
------ update user metadata of a file ----------
------ create directory with user metadata ----------
------ update user metadata of a directory ----------
------------------------------
------ write acl metadata ----------
------------------------------
-- create forbidden by acl ---
------------------------------
Tests cdmi object DELETE requests
----- basic delete -----------
------------------------------
----- delete group file ------
------------------------------
Tests cdmi container DELETE requests
----- basic delete -----------
------------------------------
------ recursive delete ------
------------------------------
------------------------------
json string), or without (when we treat request body as new file content)
-------- basic create --------
------------------------------
------ base64 create ---------
------------------------------
------- create empty ---------
------------------------------
------ create noncdmi --------
------------------------------
------------------------------
------------------------------
--- value replace, http ------
------------------------------
---- value update, http ------
------------------------------
------------------------------
---- value update, http error ------
------------------------------
given
when
then
given
when
then
with '/'
------ non-cdmi create -------
------------------------------
------ basic create ----------
------------------------------
---------- update ------------
------------------------------
----- missing parent ---------
------------------------------
tests access to file by objectid
-------- / objectid ----------
------------------------------
------ /dir objectid ---------
------------------------------
------------------------------
---- get / by objectid -------
------------------------------
------------------------------
------------------------------
---- unauthorized access to / by objectid -------
------------------------------
tests if capabilities of objects, containers, and whole storage system are set properly
--- system capabilities ------
------------------------------
-- container capabilities ----
------------------------------
------------------------------
--------- dir test -----------
------------------------------
--------- dir test with QS-----------
------------------------------
--------- file test ----------
------------------------------
tests req format checking
-- obj missing content-type --
------------------------------
------------------------------
and should be changeble
------------------------------
-- update mime and encoding --
------------------------------
create file with given mime and encoding
------------------------------
create file with given mime and encoding using non-cdmi request
------------------------------
tests reading&writing file at random ranges
---- reading out of range ---- (shuld return empty binary)
------------------------------
------------------------------
RequestBody3 = json_utils:encode(#{<<"value">> => base64:encode(<<"data">>)}), todo fix -1443 and uncomment
" data(6x<0_byte>)data "
------------------------------
------------------------------
------------------------------
tests copy and move operations on dataobjects and containers
----------- dir mv -----------
------------------------------
---------- file mv -----------
------------------------------
create file to copy
assert source file is created and destination does not exist
copy file using cdmi
assert new file is created
------------------------------
---------- dir cp ------------
create dir to copy (with some subdirs and subfiles)
assert source files are successfully created, and destination file does not exist
assert source files still exists
assert destination files have been created
------------------------------
tests cdmi and non-cdmi partial upload feature (requests with x-cdmi-partial flag set to true)
upload first chunk of file
get created file and check its consistency
File size event change is async
------------------------------
----- non-cdmi request partial upload -------
upload first chunk of file
check "completionStatus", should be set to "Processing"
get created file and check its consistency
File size event change is async
------------------------------
tests access control lists
----- read file test ---------
create test file with dummy data
set acl to 'read&write' and test cdmi/non-cdmi get request (should succeed)
------------------------------
------- write file test ------
set acl to 'read&write' and test cdmi/non-cdmi put request (should succeed)
------------------------------
------ delete file test ------
set acl to 'delete'
delete file
------------------------------
--- read write dir test ------
set acl to 'read&write' and test cdmi get request (should succeed)
create files in directory (should succeed)
delete files (should succeed)
------------------------------
test error handling
---- unauthorized access -----
------------------------------
----- wrong create path ------
------------------------------
------------------------------
-------- wrong base64 --------
------------------------------
-- reading non-existing file --
------------------------------
--- listing non-existing dir -----
------------------------------
--- open binary file without permission -----
------------------------------
--- open cdmi file without permission -----
------------------------------
when
then
given
when
given
when
given
when
given
when
===================================================================
===================================================================
===================================================================
===================================================================
% Useful log for debugging
Returned error may not be necessarily ?ERROR_SPACE_NOT_SUPPORTED(_, _)
as some errors may be thrown even before file path resolution attempt
(and such errors are explicitly checked by some tests),
but it should never be any successful response | @author
( C ) 2015 ACK CYFRONET AGH
This software is released under the MIT license
cited in ' LICENSE.txt ' .
CDMI tests
-module(cdmi_test_base).
-author("Tomasz Lichon").
-include("global_definitions.hrl").
-include("http/cdmi.hrl").
-include("http/rest.hrl").
-include("modules/fslogic/acl.hrl").
-include("modules/fslogic/fslogic_common.hrl").
-include("modules/fslogic/file_attr.hrl").
-include("modules/fslogic/metadata.hrl").
-include("modules/logical_file_manager/lfm.hrl").
-include("proto/common/credentials.hrl").
-include_lib("ctool/include/errors.hrl").
-include_lib("ctool/include/logging.hrl").
-include_lib("ctool/include/http/headers.hrl").
-include_lib("ctool/include/test/assertions.hrl").
-include_lib("ctool/include/test/performance.hrl").
-include_lib("ctool/include/test/test_utils.hrl").
-export([
list_dir/1,
get_file/1,
metadata/1,
delete_file/1,
delete_dir/1,
create_file/1,
update_file/1,
create_dir/1,
capabilities/1,
use_supported_cdmi_version/1,
use_unsupported_cdmi_version/1,
moved_permanently/1,
objectid/1,
request_format_check/1,
mimetype_and_encoding/1,
out_of_range/1,
partial_upload/1,
acl/1,
errors/1,
accept_header/1,
move_copy_conflict/1,
move/1,
copy/1,
create_raw_file_with_cdmi_version_header_should_succeed/1,
create_raw_dir_with_cdmi_version_header_should_succeed/1,
create_cdmi_file_without_cdmi_version_header_should_fail/1,
create_cdmi_dir_without_cdmi_version_header_should_fail/1
]).
-define(TIMEOUT, timer:seconds(5)).
user_1_token_header(Config) ->
rest_test_utils:user_token_header(?config({access_token, <<"user1">>}, Config)).
-define(CDMI_VERSION_HEADER, {<<"X-CDMI-Specification-Version">>, <<"1.1.1">>}).
-define(CONTAINER_CONTENT_TYPE_HEADER, {?HDR_CONTENT_TYPE, <<"application/cdmi-container">>}).
-define(OBJECT_CONTENT_TYPE_HEADER, {?HDR_CONTENT_TYPE, <<"application/cdmi-object">>}).
-define(FILE_BEGINNING, 0).
-define(INFINITY, 9999).
list_dir(Config) ->
Workers = ?config(op_worker_nodes, Config),
{SpaceName, ShortTestDirName, TestDirName, TestFileName, _FullTestFileName, _TestFileContent} =
create_test_dir_and_file(Config),
TestDirNameCheck = list_to_binary(ShortTestDirName ++ "/"),
TestFileNameBin = list_to_binary(TestFileName),
{ok, Code1, Headers1, Response1} =
do_request(Workers, TestDirName ++ "/", get,
[user_1_token_header(Config), ?CDMI_VERSION_HEADER], []),
?assertEqual(?HTTP_200_OK, Code1),
?assertMatch(#{?HDR_CONTENT_TYPE := <<"application/cdmi-container">>}, Headers1),
CdmiResponse1 = json_utils:decode(Response1),
?assertMatch(#{<<"objectType">> := <<"application/cdmi-container">>},
CdmiResponse1),
?assertMatch(#{<<"objectName">> := TestDirNameCheck}, CdmiResponse1),
?assertMatch(#{<<"completionStatus">> := <<"Complete">>}, CdmiResponse1),
?assertMatch(#{<<"children">> := [TestFileNameBin]}, CdmiResponse1),
?assert(maps:get(<<"metadata">>, CdmiResponse1) =/= <<>>),
{ok, Code2, _Headers2, Response2} =
do_request(Workers, SpaceName ++ "/", get,
[user_1_token_header(Config), ?CDMI_VERSION_HEADER], []),
?assertEqual(?HTTP_200_OK, Code2),
CdmiResponse2 = json_utils:decode(Response2),
SpaceDirName = list_to_binary(SpaceName ++ "/"),
?assertMatch(#{<<"objectName">> := SpaceDirName}, CdmiResponse2),
?assertMatch(#{<<"children">> := [TestDirNameCheck]}, CdmiResponse2),
--- list -----
{ok, Code3, _Headers3, _Response3} =
do_request(Workers, "nonexisting_dir/",
get, [user_1_token_header(Config), ?CDMI_VERSION_HEADER], []),
?assertEqual(?HTTP_404_NOT_FOUND, Code3),
{ok, Code4, _Headers4, Response4} =
do_request(Workers, TestDirName ++ "/?children;objectName",
get, [user_1_token_header(Config), ?CDMI_VERSION_HEADER], []),
?assertEqual(?HTTP_200_OK, Code4),
CdmiResponse4 = json_utils:decode(Response4),
?assertMatch(#{<<"objectName">> := TestDirNameCheck}, CdmiResponse4),
?assertMatch(#{<<"children">> := [TestFileNameBin]}, CdmiResponse4),
?assertEqual(2, maps:size(CdmiResponse4)),
ChildrangeDir = SpaceName ++ "/childrange/",
mkdir(Config, ChildrangeDir),
Childs = ["0", "1", "2", "3", "4", "5", "6", "7", "8", "9", "10", "11",
"12", "13", "14"],
ChildsBinaries = lists:map(fun(X) -> list_to_binary(X) end, Childs),
lists:map(fun(FileName) ->
create_file(Config, filename:join(ChildrangeDir, FileName))
end, Childs),
{ok, Code5, _Headers5, Response5} =
do_request(Workers, ChildrangeDir ++ "?children;childrenrange",
get, [user_1_token_header(Config), ?CDMI_VERSION_HEADER], []),
?assertEqual(?HTTP_200_OK, Code5),
CdmiResponse5 = (json_utils:decode(Response5)),
ChildrenResponse1 = maps:get(<<"children">>, CdmiResponse5),
?assert(is_list(ChildrenResponse1)),
lists:foreach(fun(Name) ->
?assert(lists:member(Name, ChildrenResponse1))
end, ChildsBinaries),
?assertMatch(#{<<"childrenrange">> := <<"0-14">>}, CdmiResponse5),
{ok, Code6, _, Response6} =
do_request(Workers, ChildrangeDir ++ "?children:2-13;childrenrange", get,
[user_1_token_header(Config), ?CDMI_VERSION_HEADER], []),
{ok, Code7, _, Response7} =
do_request(Workers, ChildrangeDir ++ "?children:0-1;childrenrange", get,
[user_1_token_header(Config), ?CDMI_VERSION_HEADER], []),
{ok, Code8, _, Response8} =
do_request(Workers, ChildrangeDir ++ "?children:14-14;childrenrange", get,
[user_1_token_header(Config), ?CDMI_VERSION_HEADER], []),
?assertEqual(?HTTP_200_OK, Code6),
?assertEqual(?HTTP_200_OK, Code7),
?assertEqual(?HTTP_200_OK, Code8),
CdmiResponse6 = json_utils:decode(Response6),
CdmiResponse7 = json_utils:decode(Response7),
CdmiResponse8 = json_utils:decode(Response8),
ChildrenResponse6 = maps:get(<<"children">>, CdmiResponse6),
ChildrenResponse7 = maps:get(<<"children">>, CdmiResponse7),
ChildrenResponse8 = maps:get(<<"children">>, CdmiResponse8),
?assert(is_list(ChildrenResponse6)),
?assert(is_list(ChildrenResponse7)),
?assert(is_list(ChildrenResponse8)),
?assertEqual(12, length(ChildrenResponse6)),
?assertEqual(2, length(ChildrenResponse7)),
?assertEqual(1, length(ChildrenResponse8)),
?assertMatch(#{<<"childrenrange">> := <<"2-13">>}, CdmiResponse6),
?assertMatch(#{<<"childrenrange">> := <<"0-1">>}, CdmiResponse7),
?assertMatch(#{<<"childrenrange">> := <<"14-14">>}, CdmiResponse8),
lists:foreach(
fun(Name) ->
?assert(lists:member(Name,
ChildrenResponse6 ++ ChildrenResponse7 ++ ChildrenResponse8))
end, ChildsBinaries).
Tests cdmi object GET request . Request can be done without header ( in that case
get_file(Config) ->
[{_SpaceId, SpaceName} | _] = ?config({spaces, <<"user1">>}, Config),
EmptyFileName = filename:join([binary_to_list(SpaceName), "empty.txt"]),
FilledFileName = filename:join([binary_to_list(SpaceName), "toRead.txt"]),
FileContent = <<"Some content...">>,
Workers = ?config(op_worker_nodes, Config),
{ok, _} = create_file(Config, EmptyFileName),
{ok, _} = create_file(Config, FilledFileName),
?assert(object_exists(Config, FilledFileName)),
{ok, _} = write_to_file(Config, FilledFileName, FileContent, ?FILE_BEGINNING),
?assertEqual(FileContent, get_file_content(Config, FilledFileName)),
RequestHeaders1 = [?CDMI_VERSION_HEADER, user_1_token_header(Config)],
{ok, Code1, _Headers1, Response1} = do_request(Workers, FilledFileName, get, RequestHeaders1, []),
?assertEqual(?HTTP_200_OK, Code1),
CdmiResponse1 = json_utils:decode(Response1),
FileContent1 = base64:encode(FileContent),
SpaceName1 = <<"/", SpaceName/binary, "/">>,
?assertMatch(#{<<"objectType">> := <<"application/cdmi-object">>}, CdmiResponse1),
?assertMatch(#{<<"objectName">> := <<"toRead.txt">>}, CdmiResponse1),
?assertMatch(#{<<"completionStatus">> := <<"Complete">>}, CdmiResponse1),
?assertMatch(#{<<"valuetransferencoding">> := <<"base64">>}, CdmiResponse1),
?assertMatch(#{<<"mimetype">> := <<"application/octet-stream">>}, CdmiResponse1),
?assertMatch(#{<<"parentURI">> := SpaceName1}, CdmiResponse1),
?assertMatch(#{<<"value">> := FileContent1}, CdmiResponse1),
?assertMatch(#{<<"valuerange">> := <<"0-14">>}, CdmiResponse1),
?assert(maps:get(<<"metadata">>, CdmiResponse1) =/= <<>>),
RequestHeaders2 = [?CDMI_VERSION_HEADER, user_1_token_header(Config)],
{ok, Code2, _Headers2, Response2} = do_request(Workers, FilledFileName ++ "?parentURI;completionStatus", get, RequestHeaders2, []),
?assertEqual(?HTTP_200_OK, Code2),
CdmiResponse2 = json_utils:decode(Response2),
?assertMatch(#{<<"completionStatus">> := <<"Complete">>}, CdmiResponse2),
?assertMatch(#{<<"parentURI">> := SpaceName1}, CdmiResponse2),
?assertEqual(2, maps:size(CdmiResponse2)),
RequestHeaders3 = [?CDMI_VERSION_HEADER, user_1_token_header(Config)],
{ok, Code3, _Headers3, Response3} = do_request(Workers, FilledFileName ++ "?value:1-3;valuerange", get, RequestHeaders3, []),
?assertEqual(?HTTP_200_OK, Code3),
CdmiResponse3 = json_utils:decode(Response3),
?assertMatch(#{<<"valuerange">> := <<"1-3">>}, CdmiResponse3),
1 - 3 from FileContent = < < " Some content ... " > >
{ok, Code4, Headers4, Response4} =
do_request(Workers, FilledFileName, get, [user_1_token_header(Config)]),
?assertEqual(?HTTP_200_OK, Code4),
?assertMatch(#{?HDR_CONTENT_TYPE := <<"application/octet-stream">>}, Headers4),
?assertEqual(FileContent, Response4),
------- read --------
RequestHeaders5 = [?CDMI_VERSION_HEADER, user_1_token_header(Config)],
{ok, Code5, _Headers5, Response5} = do_request(Workers, FilledFileName ++ "?objectID", get, RequestHeaders5, []),
?assertEqual(?HTTP_200_OK, Code5),
CdmiResponse5 = (json_utils:decode(Response5)),
ObjectID = maps:get(<<"objectID">>, CdmiResponse5),
?assert(is_binary(ObjectID)),
RequestHeaders6 = [?CDMI_VERSION_HEADER, user_1_token_header(Config)],
{ok, Code6, _Headers6, Response6} = do_request(Workers, "cdmi_objectid/" ++ binary_to_list(ObjectID), get, RequestHeaders6, []),
?assertEqual(?HTTP_200_OK, Code6),
CdmiResponse6 = (json_utils:decode(Response6)),
?assertEqual(FileContent, base64:decode(maps:get(<<"value">>, CdmiResponse6))),
?assertMatch(
{ok, ?HTTP_206_PARTIAL_CONTENT, #{?HDR_CONTENT_RANGE := <<"bytes 5-8/15">>}, <<"cont">>},
do_request(Workers, FilledFileName, get, [
{?HDR_RANGE, <<"bytes=5-8">>}, user_1_token_header(Config)
])
),
{ok, _, #{
?HDR_CONTENT_TYPE := <<"multipart/byteranges; boundary=", Boundary/binary>>
}, Response8} = ?assertMatch(
{ok, ?HTTP_206_PARTIAL_CONTENT, #{?HDR_CONTENT_TYPE := <<"multipart/byteranges", _/binary>>}, _},
do_request(Workers, FilledFileName, get, [
{?HDR_RANGE, <<"bytes=1-3,5-5,-3">>}, user_1_token_header(Config)
])
),
ExpResponse8 = <<
"--", Boundary/binary,
"\r\ncontent-type: application/octet-stream\r\ncontent-range: bytes 1-3/15",
"\r\n\r\nome",
"--", Boundary/binary,
"\r\ncontent-type: application/octet-stream\r\ncontent-range: bytes 5-5/15",
"\r\n\r\nc",
"--", Boundary/binary,
"\r\ncontent-type: application/octet-stream\r\ncontent-range: bytes 12-14/15",
"\r\n\r\n...\r\n",
"--", Boundary/binary, "--"
>>,
?assertEqual(ExpResponse8, Response8),
read file non - cdmi with invalid Range should fail
lists:foreach(fun(InvalidRange) ->
?assertMatch(
{ok, ?HTTP_416_RANGE_NOT_SATISFIABLE, #{?HDR_CONTENT_RANGE := <<"bytes */15">>}, <<>>},
do_request(Workers, FilledFileName, get, [
{?HDR_RANGE, InvalidRange}, user_1_token_header(Config)
])
)
end, [
<<"unicorns">>,
<<"bytes:5-10">>,
<<"bytes=5=10">>,
<<"bytes=-15-10">>,
<<"bytes=100-150">>,
<<"bytes=10-5">>,
<<"bytes=-5-">>,
<<"bytes=10--5">>,
<<"bytes=10-15-">>
]),
?assertMatch(
{ok, ?HTTP_200_OK, _, <<>>},
do_request(Workers, EmptyFileName, get, [user_1_token_header(Config)])
),
read empty file non - cdmi with Range should return 416
?assertMatch(
{ok, ?HTTP_416_RANGE_NOT_SATISFIABLE, #{?HDR_CONTENT_RANGE := <<"bytes */0">>}, <<>>},
do_request(Workers, EmptyFileName, get, [
{?HDR_RANGE, <<"bytes=10-15">>}, user_1_token_header(Config)
])
).
metadata(Config) ->
Workers = ?config(op_worker_nodes, Config),
[{_SpaceId, SpaceName} | _] = ?config({spaces, <<"user1">>}, Config),
UserId1 = ?config({user_id, <<"user1">>}, Config),
FileName = filename:join([binary_to_list(SpaceName), "metadataTest.txt"]),
FileContent = <<"Some content...">>,
DirName = filename:join([binary_to_list(SpaceName), "metadataTestDir"]) ++ "/",
?assert(not object_exists(Config, FileName)),
RequestHeaders1 = [?OBJECT_CONTENT_TYPE_HEADER, ?CDMI_VERSION_HEADER, user_1_token_header(Config)],
RequestBody1 = #{
<<"value">> => FileContent,
<<"valuetransferencoding">> => <<"utf-8">>,
<<"mimetype">> => <<"text/plain">>,
<<"metadata">> => #{<<"my_metadata">> => <<"my_value">>,
<<"cdmi_not_allowed">> => <<"my_value">>}},
RawRequestBody1 = json_utils:encode(RequestBody1),
Before = time:seconds_to_datetime(global_clock:timestamp_seconds()),
{ok, Code1, _Headers1, Response1} = do_request(Workers, FileName, put, RequestHeaders1, RawRequestBody1),
After = time:seconds_to_datetime(global_clock:timestamp_seconds()),
?assertEqual(?HTTP_201_CREATED, Code1),
CdmiResponse1 = (json_utils:decode(Response1)),
Metadata = maps:get(<<"metadata">>, CdmiResponse1),
Metadata1 = Metadata,
?assertMatch(#{<<"cdmi_size">> := <<"15">>}, Metadata1),
CTime1 = time:iso8601_to_datetime(maps:get(<<"cdmi_ctime">>, Metadata1)),
ATime1 = time:iso8601_to_datetime(maps:get(<<"cdmi_atime">>, Metadata1)),
MTime1 = time:iso8601_to_datetime(maps:get(<<"cdmi_mtime">>, Metadata1)),
?assert(Before =< ATime1),
?assert(Before =< MTime1),
?assert(Before =< CTime1),
?assert(ATime1 =< After),
?assert(MTime1 =< After),
?assert(CTime1 =< After),
?assertMatch(UserId1, maps:get(<<"cdmi_owner">>, Metadata1)),
?assertMatch(#{<<"my_metadata">> := <<"my_value">>}, Metadata1),
?assertEqual(6, maps:size(Metadata1)),
{ok, ?HTTP_200_OK, _Headers2, Response2} = do_request(Workers, FileName ++ "?metadata", get, RequestHeaders1, []),
CdmiResponse2 = (json_utils:decode(Response2)),
?assertEqual(1, maps:size(CdmiResponse2)),
Metadata2 = maps:get(<<"metadata">>, CdmiResponse2),
?assertEqual(6, maps:size(Metadata2)),
{ok, ?HTTP_200_OK, _Headers3, Response3} = do_request(Workers, FileName ++ "?metadata:cdmi_", get, RequestHeaders1, []),
CdmiResponse3 = (json_utils:decode(Response3)),
?assertEqual(1, maps:size(CdmiResponse3)),
Metadata3 = maps:get(<<"metadata">>, CdmiResponse3),
?assertEqual(5, maps:size(Metadata3)),
{ok, ?HTTP_200_OK, _Headers4, Response4} = do_request(Workers, FileName ++ "?metadata:cdmi_o", get, RequestHeaders1, []),
CdmiResponse4 = json_utils:decode(Response4),
?assertEqual(1, maps:size(CdmiResponse4)),
Metadata4 = maps:get(<<"metadata">>, CdmiResponse4),
?assertMatch(UserId1, maps:get(<<"cdmi_owner">>, Metadata4)),
?assertEqual(1, maps:size(Metadata4)),
{ok, ?HTTP_200_OK, _Headers5, Response5} = do_request(Workers, FileName ++ "?metadata:cdmi_size", get, RequestHeaders1, []),
CdmiResponse5 = json_utils:decode(Response5),
?assertEqual(1, maps:size(CdmiResponse5)),
Metadata5 = maps:get(<<"metadata">>, CdmiResponse5),
?assertMatch(#{<<"cdmi_size">> := <<"15">>}, Metadata5),
?assertEqual(1, maps:size(Metadata5)),
{ok, ?HTTP_200_OK, _Headers6, Response6} = do_request(Workers, FileName ++ "?metadata:cdmi_no_such_metadata", get, RequestHeaders1, []),
CdmiResponse6 = json_utils:decode(Response6),
?assertEqual(1, maps:size(CdmiResponse6)),
?assertMatch(#{<<"metadata">> := #{}}, CdmiResponse6),
RequestBody7 = #{<<"metadata">> => #{<<"my_new_metadata">> => <<"my_new_value">>}},
RawRequestBody7 = json_utils:encode(RequestBody7),
{ok, ?HTTP_204_NO_CONTENT, _, _} = do_request(Workers, FileName, put, RequestHeaders1, RawRequestBody7),
{ok, ?HTTP_200_OK, _Headers7, Response7} = do_request(Workers, FileName ++ "?metadata:my", get, RequestHeaders1, []),
CdmiResponse7 = (json_utils:decode(Response7)),
?assertEqual(1, maps:size(CdmiResponse7)),
Metadata7 = maps:get(<<"metadata">>, CdmiResponse7),
?assertMatch(#{<<"my_new_metadata">> := <<"my_new_value">>}, Metadata7),
?assertEqual(1, maps:size(Metadata7)),
RequestBody8 = #{<<"metadata">> =>
#{<<"my_new_metadata_add">> => <<"my_new_value_add">>,
<<"my_new_metadata">> => <<"my_new_value_update">>,
<<"cdmi_not_allowed">> => <<"my_value">>}},
RawRequestBody8 = json_utils:encode(RequestBody8),
{ok, ?HTTP_204_NO_CONTENT, _, _} = do_request(Workers, FileName ++ "?metadata:my_new_metadata_add;metadata:my_new_metadata;metadata:cdmi_not_allowed",
put, RequestHeaders1, RawRequestBody8),
{ok, ?HTTP_200_OK, _Headers8, Response8} = do_request(Workers, FileName ++ "?metadata:my", get, RequestHeaders1, []),
CdmiResponse8 = (json_utils:decode(Response8)),
?assertEqual(1, maps:size(CdmiResponse8)),
Metadata8 = maps:get(<<"metadata">>, CdmiResponse8),
?assertMatch(#{<<"my_new_metadata_add">> := <<"my_new_value_add">>}, Metadata8),
?assertMatch(#{<<"my_new_metadata">> := <<"my_new_value_update">>}, Metadata8),
?assertEqual(2, maps:size(Metadata8)),
{ok, ?HTTP_200_OK, _Headers9, Response9} = do_request(Workers, FileName ++ "?metadata:cdmi_", get, RequestHeaders1, []),
CdmiResponse9 = (json_utils:decode(Response9)),
?assertEqual(1, maps:size(CdmiResponse9)),
Metadata9 = maps:get(<<"metadata">>, CdmiResponse9),
?assertEqual(5, maps:size(Metadata9)),
RequestBody10 = #{<<"metadata">> => #{<<"my_new_metadata">> => <<"my_new_value_ignore">>}},
RawRequestBody10 = json_utils:encode(RequestBody10),
{ok, ?HTTP_204_NO_CONTENT, _, _} = do_request(Workers, FileName ++ "?metadata:my_new_metadata_add", put, RequestHeaders1,
RawRequestBody10),
{ok, ?HTTP_200_OK, _Headers10, Response10} = do_request(Workers, FileName ++ "?metadata:my", get, RequestHeaders1, []),
CdmiResponse10 = (json_utils:decode(Response10)),
?assertEqual(1, maps:size(CdmiResponse10)),
Metadata10 = maps:get(<<"metadata">>, CdmiResponse10),
?assertMatch(#{<<"my_new_metadata">> := <<"my_new_value_update">>}, Metadata10),
?assertEqual(1, maps:size(Metadata10)),
RequestHeaders2 = [?CONTAINER_CONTENT_TYPE_HEADER, ?CDMI_VERSION_HEADER, user_1_token_header(Config)],
RequestBody11 = #{<<"metadata">> => #{<<"my_metadata">> => <<"my_dir_value">>}},
RawRequestBody11 = json_utils:encode(RequestBody11),
{ok, ?HTTP_201_CREATED, _Headers11, Response11} = do_request(Workers, DirName, put, RequestHeaders2, RawRequestBody11),
CdmiResponse11 = (json_utils:decode(Response11)),
Metadata11 = maps:get(<<"metadata">>, CdmiResponse11),
?assertMatch(#{<<"my_metadata">> := <<"my_dir_value">>}, Metadata11),
RequestBody12 = #{<<"metadata">> => #{<<"my_metadata">> => <<"my_dir_value_update">>}},
RawRequestBody12 = json_utils:encode(RequestBody12),
{ok, ?HTTP_204_NO_CONTENT, _, _} = do_request(Workers, DirName, put, RequestHeaders2, RawRequestBody12),
{ok, ?HTTP_200_OK, _Headers13, Response13} = do_request(Workers, DirName ++ "?metadata:my", get, RequestHeaders1, []),
CdmiResponse13 = (json_utils:decode(Response13)),
?assertEqual(1, maps:size(CdmiResponse13)),
Metadata13 = maps:get(<<"metadata">>, CdmiResponse13),
?assertMatch(#{<<"my_metadata">> := <<"my_dir_value_update">>}, Metadata13),
?assertEqual(1, maps:size(Metadata13)),
UserId1 = ?config({user_id, <<"user1">>}, Config),
UserName1 = ?config({user_name, <<"user1">>}, Config),
FileName2 = filename:join([binary_to_list(SpaceName), "acl_test_file.txt"]),
Ace1 = ace:to_json(#access_control_entity{
acetype = ?allow_mask,
identifier = UserId1,
name = UserName1,
aceflags = ?no_flags_mask,
acemask = ?read_all_object_mask
}, cdmi),
Ace2 = ace:to_json(#access_control_entity{
acetype = ?allow_mask,
identifier = UserId1,
name = UserName1,
aceflags = ?no_flags_mask,
acemask = ?write_all_object_mask
}, cdmi),
Ace2Full = #{
<<"acetype">> => ?allow,
<<"identifier">> => <<UserName1/binary, "#", UserId1/binary>>,
<<"aceflags">> => ?no_flags,
<<"acemask">> => <<
?write_object/binary, ",",
?write_metadata/binary, ",",
?write_attributes/binary, ",",
?delete/binary, ",",
?write_acl/binary
>>
},
create_file(Config, FileName2),
write_to_file(Config, FileName2, <<"data">>, 0),
RequestBody15 = #{<<"metadata">> => #{<<"cdmi_acl">> => [Ace1, Ace2Full]}},
RawRequestBody15 = json_utils:encode(RequestBody15),
RequestHeaders15 = [?OBJECT_CONTENT_TYPE_HEADER, ?CDMI_VERSION_HEADER, user_1_token_header(Config)],
{ok, Code15, _Headers15, Response15} = do_request(Workers, FileName2 ++ "?metadata:cdmi_acl", put, RequestHeaders15, RawRequestBody15),
?assertMatch({?HTTP_204_NO_CONTENT, _}, {Code15, Response15}),
{ok, Code16, _Headers16, Response16} = do_request(Workers, FileName2 ++ "?metadata", get, RequestHeaders1, []),
?assertEqual(?HTTP_200_OK, Code16),
CdmiResponse16 = (json_utils:decode(Response16)),
?assertEqual(1, maps:size(CdmiResponse16)),
Metadata16 = maps:get(<<"metadata">>, CdmiResponse16),
?assertEqual(6, maps:size(Metadata16)),
?assertMatch(#{<<"cdmi_acl">> := [Ace1, Ace2]}, Metadata16),
{ok, Code17, _Headers17, Response17} = do_request(Workers, FileName2, get, [user_1_token_header(Config)], []),
?assertEqual(?HTTP_200_OK, Code17),
?assertEqual(<<"data">>, Response17),
Ace3 = ace:to_json(#access_control_entity{
acetype = ?allow_mask,
identifier = UserId1,
name = UserName1,
aceflags = ?no_flags_mask,
acemask = ?write_metadata_mask
}, cdmi),
Ace4 = ace:to_json(#access_control_entity{
acetype = ?deny_mask,
identifier = UserId1,
name = UserName1,
aceflags = ?no_flags_mask,
acemask = ?write_object_mask
}, cdmi),
RequestBody18 = #{<<"metadata">> => #{<<"cdmi_acl">> => [Ace3, Ace4]}},
RawRequestBody18 = json_utils:encode(RequestBody18),
RequestHeaders18 = [user_1_token_header(Config), ?CONTAINER_CONTENT_TYPE_HEADER, ?CDMI_VERSION_HEADER],
{ok, Code18, _Headers18, _Response18} = do_request(Workers, DirName ++ "?metadata:cdmi_acl", put, RequestHeaders18, RawRequestBody18),
?assertEqual(?HTTP_204_NO_CONTENT, Code18),
{ok, Code19, _Headers19, Response19} = do_request(Workers, filename:join(DirName, "some_file"), put, [user_1_token_header(Config)], []),
ExpRestError = rest_test_utils:get_rest_error(?ERROR_POSIX(?EACCES)),
?assertMatch(ExpRestError, {Code19, json_utils:decode(Response19)}).
delete_file(Config) ->
[{_SpaceId, SpaceName} | _] = ?config({spaces, <<"user1">>}, Config),
FileName = filename:join([binary_to_list(SpaceName), "toDelete.txt"]),
Workers = ?config(op_worker_nodes, Config),
GroupFileName =
filename:join([binary_to_list(SpaceName), "groupFile"]),
{ok, _} = create_file(Config, "/" ++ FileName),
?assert(object_exists(Config, FileName)),
RequestHeaders1 = [?CDMI_VERSION_HEADER],
{ok, Code1, _Headers1, _Response1} =
do_request(
Workers, FileName, delete, [user_1_token_header(Config) | RequestHeaders1]),
?assertEqual(?HTTP_204_NO_CONTENT, Code1),
?assert(not object_exists(Config, FileName)),
{ok, _} = create_file(Config, GroupFileName),
RequestHeaders2 = [?CDMI_VERSION_HEADER],
{ok, Code2, _Headers2, _Response2} =
do_request(Workers, GroupFileName, delete,
[user_1_token_header(Config) | RequestHeaders2]),
?assertEqual(?HTTP_204_NO_CONTENT, Code2),
?assert(not object_exists(Config, GroupFileName)).
delete_dir(Config) ->
Workers = ?config(op_worker_nodes, Config),
[{_SpaceId, SpaceName} | _] = ?config({spaces, <<"user1">>}, Config),
DirName = filename:join([binary_to_list(SpaceName), "toDelete"]) ++ "/",
ChildDirName = filename:join([binary_to_list(SpaceName), "toDelete", "child"]) ++ "/",
mkdir(Config, DirName),
?assert(object_exists(Config, DirName)),
RequestHeaders1 = [
user_1_token_header(Config),
?CDMI_VERSION_HEADER,
?CONTAINER_CONTENT_TYPE_HEADER
],
{ok, Code1, _Headers1, _Response1} =
do_request(Workers, DirName, delete, RequestHeaders1, []),
?assertEqual(?HTTP_204_NO_CONTENT, Code1),
?assert(not object_exists(Config, DirName)),
mkdir(Config, DirName),
?assert(object_exists(Config, DirName)),
mkdir(Config, ChildDirName),
?assert(object_exists(Config, DirName)),
RequestHeaders2 = [
user_1_token_header(Config),
?CDMI_VERSION_HEADER,
?CONTAINER_CONTENT_TYPE_HEADER
],
{ok, Code2, _Headers2, _Response2} =
do_request(Workers, DirName, delete, RequestHeaders2, []),
?assertEqual(?HTTP_204_NO_CONTENT, Code2),
?assert(not object_exists(Config, DirName)),
?assert(not object_exists(Config, ChildDirName)),
----- delete root dir -------
?assert(object_exists(Config, "/")),
RequestHeaders3 = [user_1_token_header(Config), ?CDMI_VERSION_HEADER],
?assert(object_exists(Config, "/")),
{ok, Code3, _Headers3, Response3} =
do_request(Workers, "/", delete, RequestHeaders3, []),
ExpRestError = rest_test_utils:get_rest_error(?ERROR_POSIX(?EPERM)),
?assertMatch(ExpRestError, {Code3, json_utils:decode(Response3)}),
?assert(object_exists(Config, "/")).
Tests file creation ( cdmi object PUT ) , It can be done with cdmi header ( when file data is provided as cdmi - object
create_file(Config) ->
Workers = ?config(op_worker_nodes, Config),
[{_SpaceId, SpaceName} | _] = ?config({spaces, <<"user1">>}, Config),
ToCreate = filename:join([binary_to_list(SpaceName), "file1.txt"]),
ToCreate2 = filename:join([binary_to_list(SpaceName), "file2.txt"]),
ToCreate5 = filename:join([binary_to_list(SpaceName), "file3.txt"]),
ToCreate4 = filename:join([binary_to_list(SpaceName), "file4.txt"]),
FileContent = <<"File content!">>,
?assert(not object_exists(Config, ToCreate)),
RequestHeaders1 = [?OBJECT_CONTENT_TYPE_HEADER, ?CDMI_VERSION_HEADER, user_1_token_header(Config)],
RequestBody1 = #{<<"value">> => FileContent},
RawRequestBody1 = json_utils:encode((RequestBody1)),
{ok, Code1, _Headers1, Response1} = do_request(Workers, ToCreate, put, RequestHeaders1, RawRequestBody1),
?assertEqual(?HTTP_201_CREATED, Code1),
CdmiResponse1 = (json_utils:decode(Response1)),
?assertMatch(#{<<"objectType">> := <<"application/cdmi-object">>}, CdmiResponse1),
?assertMatch(#{<<"objectName">> := <<"file1.txt">>}, CdmiResponse1),
SpaceName1 = <<"/", SpaceName/binary, "/">>,
?assertMatch(#{<<"parentURI">> := SpaceName1}, CdmiResponse1),
?assertMatch(#{<<"completionStatus">> := <<"Complete">>}, CdmiResponse1),
Metadata1 = maps:get(<<"metadata">>, CdmiResponse1),
?assertNotEqual([], Metadata1),
?assert(object_exists(Config, ToCreate)),
?assertEqual(FileContent, get_file_content(Config, ToCreate)),
?assert(not object_exists(Config, ToCreate2)),
RequestHeaders2 = [?OBJECT_CONTENT_TYPE_HEADER, ?CDMI_VERSION_HEADER, user_1_token_header(Config)],
RequestBody2 = #{<<"valuetransferencoding">> => <<"base64">>,
<<"value">> => base64:encode(FileContent)},
RawRequestBody2 = json_utils:encode((RequestBody2)),
{ok, Code2, _Headers2, Response2} =
do_request(Workers, ToCreate2, put, RequestHeaders2, RawRequestBody2),
?assertEqual(?HTTP_201_CREATED, Code2),
CdmiResponse2 = (json_utils:decode(Response2)),
?assertMatch(#{<<"objectType">> := <<"application/cdmi-object">>}, CdmiResponse2),
?assertMatch(#{<<"objectName">> := <<"file2.txt">>}, CdmiResponse2),
SpaceName1 = <<"/", SpaceName/binary, "/">>,
?assertMatch(#{<<"parentURI">> := SpaceName1}, CdmiResponse2),
?assertMatch(#{<<"completionStatus">> := <<"Complete">>}, CdmiResponse2),
?assert(maps:get(<<"metadata">>, CdmiResponse2) =/= <<>>),
?assert(object_exists(Config, ToCreate2)),
?assertEqual(FileContent, get_file_content(Config, ToCreate2)),
?assert(not object_exists(Config, ToCreate4)),
RequestHeaders4 = [?OBJECT_CONTENT_TYPE_HEADER, ?CDMI_VERSION_HEADER, user_1_token_header(Config)],
{ok, Code4, _Headers4, _Response4} = do_request(Workers, ToCreate4, put, RequestHeaders4, []),
?assertEqual(?HTTP_201_CREATED, Code4),
?assert(object_exists(Config, ToCreate4)),
?assertEqual(<<>>, get_file_content(Config, ToCreate4)),
?assert(not object_exists(Config, ToCreate5)),
RequestHeaders5 = [{?HDR_CONTENT_TYPE, <<"application/binary">>}],
{ok, Code5, _Headers5, _Response5} =
do_request(Workers, ToCreate5, put,
[user_1_token_header(Config) | RequestHeaders5], FileContent),
?assertEqual(?HTTP_201_CREATED, Code5),
?assert(object_exists(Config, ToCreate5)),
?assertEqual(FileContent, get_file_content(Config, ToCreate5)).
Tests cdmi object PUT requests ( updating content )
update_file(Config) ->
Workers = ?config(op_worker_nodes, Config),
{_SpaceName, _ShortTestDirName, _TestDirName, _TestFileName, FullTestFileName, TestFileContent} =
create_test_dir_and_file(Config),
NewValue = <<"New Value!">>,
UpdatedValue = <<"123 Value!">>,
--- value replace , ------
?assert(object_exists(Config, FullTestFileName)),
?assertEqual(TestFileContent, get_file_content(Config, FullTestFileName)),
RequestHeaders1 = [?OBJECT_CONTENT_TYPE_HEADER, ?CDMI_VERSION_HEADER, user_1_token_header(Config)],
RequestBody1 = #{<<"value">> => NewValue},
RawRequestBody1 = json_utils:encode(RequestBody1),
{ok, Code1, _Headers1, _Response1} = do_request(Workers, FullTestFileName, put, RequestHeaders1, RawRequestBody1),
?assertEqual(?HTTP_204_NO_CONTENT, Code1),
?assert(object_exists(Config, FullTestFileName)),
?assertEqual(NewValue, get_file_content(Config, FullTestFileName)),
---- value update , ------
UpdateValue = <<"123">>,
RequestHeaders2 = [?OBJECT_CONTENT_TYPE_HEADER, ?CDMI_VERSION_HEADER, user_1_token_header(Config)],
RequestBody2 = #{<<"value">> => base64:encode(UpdateValue)},
RawRequestBody2 = json_utils:encode(RequestBody2),
{ok, Code2, _Headers2, _Response2} = do_request(Workers, FullTestFileName ++ "?value:0-2", put, RequestHeaders2, RawRequestBody2),
?assertEqual(?HTTP_204_NO_CONTENT, Code2),
?assert(object_exists(Config, FullTestFileName)),
?assertEqual(UpdatedValue, get_file_content(Config, FullTestFileName)),
RequestBody3 = TestFileContent,
{ok, Code3, _Headers3, _Response3} =
do_request(Workers, FullTestFileName, put, [user_1_token_header(Config)], RequestBody3),
?assertEqual(?HTTP_204_NO_CONTENT, Code3),
?assert(object_exists(Config, FullTestFileName)),
?assertEqual(TestFileContent,
get_file_content(Config, FullTestFileName)),
UpdateValue = <<"123">>,
RequestHeaders4 = [{?HDR_CONTENT_RANGE, <<"bytes 0-2/3">>}],
{ok, Code4, _Headers4, _Response4} =
do_request(Workers, FullTestFileName,
put, [user_1_token_header(Config) | RequestHeaders4], UpdateValue),
?assertEqual(?HTTP_204_NO_CONTENT, Code4),
?assert(object_exists(Config, FullTestFileName)),
?assertEqual(<<"123t_file_content">>,
get_file_content(Config, FullTestFileName)),
---- value update2 , http -----
UpdateValue2 = <<"00">>,
RequestHeaders5 = [{?HDR_CONTENT_RANGE, <<"bytes 3-4/*">>}],
{ok, Code5, _Headers5, _Response5} =
do_request(Workers, FullTestFileName,
put, [user_1_token_header(Config) | RequestHeaders5], UpdateValue2),
?assertEqual(?HTTP_204_NO_CONTENT, Code5),
?assert(object_exists(Config, FullTestFileName)),
?assertEqual(<<"12300file_content">>,
get_file_content(Config, FullTestFileName)),
UpdateValue = <<"123">>,
RequestHeaders6 = [{?HDR_CONTENT_RANGE, <<"bytes 0-2,3-4/*">>}],
{ok, Code6, _Headers6, Response6} =
do_request(Workers, FullTestFileName, put, [user_1_token_header(Config) | RequestHeaders6],
UpdateValue),
ExpRestError = rest_test_utils:get_rest_error(?ERROR_BAD_DATA(?HDR_CONTENT_RANGE)),
?assertMatch(ExpRestError, {Code6, json_utils:decode(Response6)}),
?assert(object_exists(Config, FullTestFileName)),
?assertEqual(<<"12300file_content">>,
get_file_content(Config, FullTestFileName)).
use_supported_cdmi_version(Config) ->
Workers = ?config(op_worker_nodes, Config),
RequestHeaders = [?CDMI_VERSION_HEADER, user_1_token_header(Config)],
{ok, Code, _ResponseHeaders, _Response} =
do_request(Workers, "/random", get, RequestHeaders),
?assertEqual(Code, ?HTTP_404_NOT_FOUND).
use_unsupported_cdmi_version(Config) ->
Workers = ?config(op_worker_nodes, Config),
RequestHeaders = [{<<"X-CDMI-Specification-Version">>, <<"1.0.2">>}],
{ok, Code, _ResponseHeaders, Response} =
do_request(Workers, "/random", get, RequestHeaders),
ExpRestError = rest_test_utils:get_rest_error(?ERROR_BAD_VERSION([<<"1.1.1">>, <<"1.1">>])),
?assertMatch(ExpRestError, {Code, json_utils:decode(Response)}).
Tests dir creation ( cdmi container PUT ) , remember that every container URI ends
create_dir(Config) ->
Workers = ?config(op_worker_nodes, Config),
[{_SpaceId, SpaceName} | _] = ?config({spaces, <<"user1">>}, Config),
DirName = filename:join([binary_to_list(SpaceName), "toCreate1"]) ++ "/",
DirName2 = filename:join([binary_to_list(SpaceName), "toCreate2"]) ++ "/",
MissingParentName = filename:join([binary_to_list(SpaceName), "unknown"]) ++ "/",
DirWithoutParentName = filename:join(MissingParentName, "dir") ++ "/",
?assert(not object_exists(Config, DirName)),
{ok, Code1, _Headers1, _Response1} =
do_request(Workers, DirName, put, [user_1_token_header(Config)]),
?assertEqual(?HTTP_201_CREATED, Code1),
?assert(object_exists(Config, DirName)),
?assert(not object_exists(Config, DirName2)),
RequestHeaders2 = [user_1_token_header(Config), ?CDMI_VERSION_HEADER, ?CONTAINER_CONTENT_TYPE_HEADER],
{ok, Code2, _Headers2, Response2} = do_request(Workers, DirName2, put, RequestHeaders2, []),
?assertEqual(?HTTP_201_CREATED, Code2),
CdmiResponse2 = (json_utils:decode(Response2)),
?assertMatch(#{<<"objectType">> := <<"application/cdmi-container">>}, CdmiResponse2),
?assertMatch(#{<<"objectName">> := <<"toCreate2/">>}, CdmiResponse2),
SpaceName1 = <<"/", SpaceName/binary, "/">>,
?assertMatch(#{<<"parentURI">> := SpaceName1}, CdmiResponse2),
?assertMatch(#{<<"completionStatus">> := <<"Complete">>}, CdmiResponse2),
?assertMatch(#{<<"children">> := []}, CdmiResponse2),
?assert(maps:get(<<"metadata">>, CdmiResponse2) =/= <<>>),
?assert(object_exists(Config, DirName2)),
?assert(object_exists(Config, DirName)),
RequestHeaders3 = [
user_1_token_header(Config), ?CDMI_VERSION_HEADER, ?CONTAINER_CONTENT_TYPE_HEADER
],
{ok, Code3, _Headers3, _Response3} =
do_request(Workers, DirName, put, RequestHeaders3, []),
?assertEqual(?HTTP_204_NO_CONTENT, Code3),
?assert(object_exists(Config, DirName)),
?assert(not object_exists(Config, MissingParentName)),
RequestHeaders4 = [
user_1_token_header(Config),
?CDMI_VERSION_HEADER,
?CONTAINER_CONTENT_TYPE_HEADER
],
{ok, Code4, _Headers4, Response4} =
do_request(Workers, DirWithoutParentName, put, RequestHeaders4, []),
ExpRestError = rest_test_utils:get_rest_error(?ERROR_POSIX(?ENOENT)),
?assertMatch(ExpRestError, {Code4, json_utils:decode(Response4)}).
objectid(Config) ->
Workers = ?config(op_worker_nodes, Config),
{SpaceName, ShortTestDirName, TestDirName, TestFileName, _FullTestFileName, _TestFileContent} =
create_test_dir_and_file(Config),
TestDirNameCheck = list_to_binary(ShortTestDirName ++ "/"),
ShortTestDirNameBin = list_to_binary(ShortTestDirName),
TestFileNameBin = list_to_binary(TestFileName),
RequestHeaders1 = [?CDMI_VERSION_HEADER, user_1_token_header(Config)],
{ok, Code1, Headers1, Response1} = do_request(Workers, "", get, RequestHeaders1, []),
?assertEqual(?HTTP_200_OK, Code1),
RequestHeaders0 = [?CDMI_VERSION_HEADER, user_1_token_header(Config)],
{ok, Code0, _Headers0, Response0} = do_request(Workers, SpaceName ++ "/", get, RequestHeaders0, []),
?assertEqual(?HTTP_200_OK, Code0),
CdmiResponse0 = json_utils:decode(Response0),
SpaceRootId = maps:get(<<"objectID">>, CdmiResponse0),
?assertMatch(#{?HDR_CONTENT_TYPE := <<"application/cdmi-container">>}, Headers1),
CdmiResponse1 = json_utils:decode(Response1),
?assertMatch(#{<<"objectName">> := <<"/">>}, CdmiResponse1),
RootId = maps:get(<<"objectID">>, CdmiResponse1, undefined),
?assertNotEqual(RootId, undefined),
?assert(is_binary(RootId)),
?assertMatch(#{<<"parentURI">> := <<>>}, CdmiResponse1),
?assertEqual(error, maps:find(<<"parentID">>, CdmiResponse1)),
?assertMatch(#{<<"capabilitiesURI">> := <<"cdmi_capabilities/container/">>}, CdmiResponse1),
RequestHeaders2 = [?CDMI_VERSION_HEADER, user_1_token_header(Config)],
{ok, Code2, _Headers2, Response2} = do_request(Workers, TestDirName ++ "/", get, RequestHeaders2, []),
?assertEqual(?HTTP_200_OK, Code2),
CdmiResponse2 = (json_utils:decode(Response2)),
?assertMatch(#{<<"objectName">> := TestDirNameCheck}, CdmiResponse2),
DirId = maps:get(<<"objectID">>, CdmiResponse2, undefined),
?assertNotEqual(DirId, undefined),
?assert(is_binary(DirId)),
ParentURI = <<"/", (list_to_binary(SpaceName))/binary, "/">>,
?assertMatch(#{<<"parentURI">> := ParentURI}, CdmiResponse2),
?assertMatch(#{<<"parentID">> := SpaceRootId}, CdmiResponse2),
?assertMatch(#{<<"capabilitiesURI">> := <<"cdmi_capabilities/container/">>}, CdmiResponse2),
--- /dir 1 / file.txt objectid ---
RequestHeaders3 = [?CDMI_VERSION_HEADER, user_1_token_header(Config)],
{ok, Code3, _Headers3, Response3} = do_request(Workers, filename:join(TestDirName, TestFileName), get, RequestHeaders3, []),
?assertEqual(?HTTP_200_OK, Code3),
CdmiResponse3 = json_utils:decode(Response3),
?assertMatch(#{<<"objectName">> := TestFileNameBin}, CdmiResponse3),
FileId = maps:get(<<"objectID">>, CdmiResponse3, undefined),
?assertNotEqual(FileId, undefined),
?assert(is_binary(FileId)),
ParentURI1 = <<"/", (list_to_binary(SpaceName))/binary, "/", ShortTestDirNameBin/binary, "/">>,
?assertMatch(#{<<"parentURI">> := ParentURI1}, CdmiResponse3),
?assertMatch(#{<<"parentID">> := DirId}, CdmiResponse3),
?assertMatch(#{<<"capabilitiesURI">> := <<"cdmi_capabilities/dataobject/">>}, CdmiResponse3),
RequestHeaders4 = [?CDMI_VERSION_HEADER, user_1_token_header(Config)],
{ok, Code4, _Headers4, Response4} = do_request(Workers, "cdmi_objectid/" ++ binary_to_list(RootId) ++ "/", get, RequestHeaders4, []),
?assertEqual(?HTTP_200_OK, Code4),
CdmiResponse4 = json_utils:decode(Response4),
Meta1 = maps:remove(<<"cdmi_atime">>, maps:get(<<"metadata">>, CdmiResponse1)),
CdmiResponse1WithoutAtime = maps:put(<<"metadata">>, Meta1, CdmiResponse1),
Meta4 = maps:remove(<<"cdmi_atime">>, maps:get(<<"metadata">>, CdmiResponse4)),
CdmiResponse4WithoutAtime = maps:put(<<"metadata">>, Meta4, CdmiResponse4),
should be the same as in 1 ( except access time )
--- get /dir 1/ by objectid ----
RequestHeaders5 = [?CDMI_VERSION_HEADER, user_1_token_header(Config)],
{ok, Code5, _Headers5, Response5} = do_request(Workers, "cdmi_objectid/" ++ binary_to_list(DirId) ++ "/", get, RequestHeaders5, []),
?assertEqual(?HTTP_200_OK, Code5),
CdmiResponse5 = json_utils:decode(Response5),
Meta2 = maps:remove(<<"cdmi_atime">>, (maps:get(<<"metadata">>, CdmiResponse2))),
CdmiResponse2WithoutAtime = maps:put(<<"metadata">>, Meta2, CdmiResponse2),
Meta5 = maps:remove(<<"cdmi_atime">>, (maps:get(<<"metadata">>, CdmiResponse5))),
CdmiResponse5WithoutAtime = maps:put(<<"metadata">>, Meta5, CdmiResponse5),
should be the same as in 2 ( except parent and access time )
maps:remove(<<"parentURI">>, maps:remove(<<"parentID">>, CdmiResponse2WithoutAtime)),
maps:remove(<<"parentURI">>, maps:remove(<<"parentID">>, CdmiResponse5WithoutAtime))
),
get /dir 1 / file.txt by objectid
RequestHeaders6 = [?CDMI_VERSION_HEADER, user_1_token_header(Config)],
{ok, Code6, _Headers6, Response6} = do_request(Workers, "cdmi_objectid/" ++ binary_to_list(DirId) ++ "/" ++ TestFileName, get, RequestHeaders6, []),
?assertEqual(?HTTP_200_OK, Code6),
CdmiResponse6 = (json_utils:decode(Response6)),
Meta3 = maps:remove(<<"cdmi_atime">>, (maps:get(<<"metadata">>, CdmiResponse3))),
CdmiResponse3WithoutAtime = maps:put(<<"metadata">>, Meta3, CdmiResponse3),
Meta6 = maps:remove(<<"cdmi_atime">>, (maps:get(<<"metadata">>, CdmiResponse6))),
CdmiResponse6WithoutAtime = maps:put(<<"metadata">>, Meta6, CdmiResponse6),
should be the same as in 3 ( except access time )
maps:remove(<<"parentURI">>, maps:remove(<<"parentID">>, CdmiResponse3WithoutAtime)),
maps:remove(<<"parentURI">>, maps:remove(<<"parentID">>, CdmiResponse6WithoutAtime))
),
{ok, Code7, _Headers7, Response7} = do_request(Workers, "cdmi_objectid/" ++ binary_to_list(FileId), get, RequestHeaders6, []),
?assertEqual(?HTTP_200_OK, Code7),
CdmiResponse7 = (json_utils:decode(Response7)),
Meta7 = maps:remove(<<"cdmi_atime">>, (maps:get(<<"metadata">>, CdmiResponse7))),
CdmiResponse7WithoutAtime = maps:merge(#{<<"metadata">> => Meta7},maps:remove(<<"metadata">>, CdmiResponse7)),
should be the same as in 6 ( except parent and access time )
maps:remove(<<"parentURI">>, maps:remove(<<"parentID">>, CdmiResponse6WithoutAtime)),
maps:remove(<<"parentURI">>, maps:remove(<<"parentID">>, CdmiResponse7WithoutAtime))
),
RequestHeaders8 = [?CDMI_VERSION_HEADER],
{ok, Code8, _, Response8} = do_request(Workers, "cdmi_objectid/" ++ binary_to_list(RootId) ++ "/", get, RequestHeaders8, []),
ExpRestError = rest_test_utils:get_rest_error(?ERROR_UNAUTHORIZED),
?assertMatch(ExpRestError, {Code8, json_utils:decode(Response8)}).
capabilities(Config) ->
Workers = ?config(op_worker_nodes, Config),
RequestHeaders8 = [?CDMI_VERSION_HEADER],
{ok, Code8, Headers8, Response8} =
do_request(Workers, "cdmi_capabilities/", get, RequestHeaders8, []),
?assertEqual(?HTTP_200_OK, Code8),
?assertMatch(#{?HDR_CONTENT_TYPE := <<"application/cdmi-capability">>}, Headers8),
CdmiResponse8 = (json_utils:decode(Response8)),
?assertMatch(#{<<"objectID">> := ?ROOT_CAPABILITY_ID}, CdmiResponse8),
?assertMatch(#{<<"objectName">> := <<?ROOT_CAPABILITY_PATH>>}, CdmiResponse8),
?assertMatch(#{<<"childrenrange">> := <<"0-1">>}, CdmiResponse8),
?assertMatch(#{<<"children">> := [<<"container/">>, <<"dataobject/">>]}, CdmiResponse8),
Capabilities = maps:get(<<"capabilities">>, CdmiResponse8),
?assertEqual(?ROOT_CAPABILITY_MAP, Capabilities),
RequestHeaders9 = [?CDMI_VERSION_HEADER],
{ok, Code9, _Headers9, Response9} =
do_request(Workers, "cdmi_capabilities/container/", get, RequestHeaders9, []),
?assertEqual(?HTTP_200_OK, Code9),
?assertMatch({ok, Code9, _, Response9}, do_request(Workers, "cdmi_objectid/" ++ binary_to_list(?CONTAINER_CAPABILITY_ID) ++ "/", get, RequestHeaders9, [])),
CdmiResponse9 = (json_utils:decode(Response9)),
?assertMatch(#{<<"parentURI">> := <<?ROOT_CAPABILITY_PATH>>}, CdmiResponse9),
?assertMatch(#{<<"parentID">> := ?ROOT_CAPABILITY_ID}, CdmiResponse9),
?assertMatch(#{<<"objectID">> := ?CONTAINER_CAPABILITY_ID}, CdmiResponse9),
?assertMatch(#{<<"objectName">> := <<"container/">>}, CdmiResponse9),
Capabilities2 = maps:get(<<"capabilities">>, CdmiResponse9),
?assertEqual(?CONTAINER_CAPABILITY_MAP, Capabilities2),
capabilities ---
RequestHeaders10 = [?CDMI_VERSION_HEADER],
{ok, Code10, _Headers10, Response10} =
do_request(Workers, "cdmi_capabilities/dataobject/", get, RequestHeaders10, []),
?assertEqual(?HTTP_200_OK, Code10),
?assertMatch({ok, Code10, _, Response10}, do_request(Workers, "cdmi_objectid/" ++ binary_to_list(?DATAOBJECT_CAPABILITY_ID) ++ "/", get, RequestHeaders10, [])),
CdmiResponse10 = (json_utils:decode(Response10)),
?assertMatch(#{<<"parentURI">> := <<?ROOT_CAPABILITY_PATH>>}, CdmiResponse10),
?assertMatch(#{<<"parentID">> := ?ROOT_CAPABILITY_ID}, CdmiResponse10),
?assertMatch(#{<<"objectID">> := ?DATAOBJECT_CAPABILITY_ID}, CdmiResponse10),
?assertMatch(#{<<"objectName">> := <<"dataobject/">>}, CdmiResponse10),
Capabilities3 = maps:get(<<"capabilities">>, CdmiResponse10),
?assertEqual(?DATAOBJECT_CAPABILITY_MAP, Capabilities3).
tests if returns ' moved permanently ' code when we forget about ' / ' in path
moved_permanently(Config) ->
[WorkerP2, _WorkerP1] = ?config(op_worker_nodes, Config),
[{_SpaceId, SpaceName} | _] = ?config({spaces, <<"user1">>}, Config),
FileName = filename:join([binary_to_list(SpaceName), "somedir", "somefile.txt"]),
DirNameWithoutSlash = filename:join([binary_to_list(SpaceName), "somedir"]),
DirName = DirNameWithoutSlash ++ "/",
FileNameWithSlash = FileName ++ "/",
mkdir(Config, DirName),
?assert(object_exists(Config, DirName)),
create_file(Config, FileName),
?assert(object_exists(Config, FileName)),
CDMIEndpoint = cdmi_test_utils:cdmi_endpoint(WorkerP2),
RequestHeaders1 = [
?CONTAINER_CONTENT_TYPE_HEADER,
?CDMI_VERSION_HEADER,
user_1_token_header(Config)
],
Location1 = list_to_binary(CDMIEndpoint ++ DirName),
{ok, Code1, Headers1, _Response1} =
do_request(WorkerP2, DirNameWithoutSlash, get, RequestHeaders1, []),
?assertEqual(?HTTP_302_FOUND, Code1),
?assertMatch(#{?HDR_LOCATION := Location1}, Headers1),
RequestHeaders2 = [
?CONTAINER_CONTENT_TYPE_HEADER,
?CDMI_VERSION_HEADER,
user_1_token_header(Config)
],
Location2 = list_to_binary(CDMIEndpoint ++ DirName ++ "?example_qs=1"),
{ok, Code2, Headers2, _Response2} =
do_request(WorkerP2, DirNameWithoutSlash ++ "?example_qs=1", get, RequestHeaders2, []),
?assertEqual(?HTTP_302_FOUND, Code2),
?assertMatch(#{?HDR_LOCATION := Location2}, Headers2),
RequestHeaders3 = [
?OBJECT_CONTENT_TYPE_HEADER,
?CDMI_VERSION_HEADER,
user_1_token_header(Config)
],
Location3 = list_to_binary(CDMIEndpoint ++ FileName),
{ok, Code3, Headers3, _Response3} =
do_request(WorkerP2, FileNameWithSlash, get, RequestHeaders3, []),
?assertEqual(?HTTP_302_FOUND, Code3),
?assertMatch(#{?HDR_LOCATION := Location3}, Headers3).
request_format_check(Config) ->
Workers = ?config(op_worker_nodes, Config),
[{_SpaceId, SpaceName} | _] = ?config({spaces, <<"user1">>}, Config),
FileToCreate = filename:join([binary_to_list(SpaceName), "file.txt"]),
DirToCreate = filename:join([binary_to_list(SpaceName), "dir"]) ++ "/",
FileContent = <<"File content!">>,
RequestHeaders1 = [?CDMI_VERSION_HEADER, user_1_token_header(Config)],
RequestBody1 = #{<<"value">> => FileContent},
RawRequestBody1 = json_utils:encode(RequestBody1),
{ok, Code1, _Headers1, _Response1} = do_request(Workers, FileToCreate, put, RequestHeaders1, RawRequestBody1),
?assertEqual(?HTTP_201_CREATED, Code1),
missing content - type --
RequestHeaders3 = [?CDMI_VERSION_HEADER, user_1_token_header(Config)],
RequestBody3 = #{<<"metadata">> => <<"">>},
RawRequestBody3 = json_utils:encode(RequestBody3),
{ok, Code3, _Headers3, _Response3} = do_request(Workers, DirToCreate, put, RequestHeaders3, RawRequestBody3),
?assertEqual(?HTTP_201_CREATED, Code3).
tests mimetype and valuetransferencoding properties , they are part of cdmi - object and cdmi - container
mimetype_and_encoding(Config) ->
Workers = ?config(op_worker_nodes, Config),
{_SpaceName, _ShortTestDirName, TestDirName, TestFileName, _FullTestFileName, _TestFileContent} =
create_test_dir_and_file(Config),
[{_SpaceId, SpaceName} | _] = ?config({spaces, <<"user1">>}, Config),
get mimetype and valuetransferencoding of non - cdmi file
RequestHeaders1 = [?CDMI_VERSION_HEADER, user_1_token_header(Config)],
{ok, Code1, _Headers1, Response1} = do_request(Workers, filename:join(TestDirName, TestFileName) ++ "?mimetype;valuetransferencoding", get, RequestHeaders1, []),
?assertEqual(?HTTP_200_OK, Code1),
CdmiResponse1 = (json_utils:decode(Response1)),
?assertMatch(#{<<"mimetype">> := <<"application/octet-stream">>}, CdmiResponse1),
?assertMatch(#{<<"valuetransferencoding">> := <<"base64">>}, CdmiResponse1),
RequestHeaders2 = [?CDMI_VERSION_HEADER, ?OBJECT_CONTENT_TYPE_HEADER, user_1_token_header(Config)],
RawBody2 = json_utils:encode(#{<<"valuetransferencoding">> => <<"utf-8">>,
<<"mimetype">> => <<"application/binary">>}),
{ok, Code2, _Headers2, _Response2} = do_request(Workers, filename:join(TestDirName, TestFileName), put, RequestHeaders2, RawBody2),
?assertEqual(?HTTP_204_NO_CONTENT, Code2),
{ok, Code3, _Headers3, Response3} = do_request(Workers, filename:join(TestDirName, TestFileName) ++ "?mimetype;valuetransferencoding", get, RequestHeaders2, []),
?assertEqual(?HTTP_200_OK, Code3),
CdmiResponse3 = (json_utils:decode(Response3)),
?assertMatch(#{<<"mimetype">> := <<"application/binary">>}, CdmiResponse3),
?assertMatch(#{<<"valuetransferencoding">> := <<"utf-8">>}, CdmiResponse3),
FileName4 = filename:join([binary_to_list(SpaceName), "mime_file.txt"]),
FileContent4 = <<"some content">>,
RequestHeaders4 = [?CDMI_VERSION_HEADER, ?OBJECT_CONTENT_TYPE_HEADER, user_1_token_header(Config)],
RawBody4 = json_utils:encode(#{<<"valuetransferencoding">> => <<"utf-8">>,
<<"mimetype">> => <<"text/plain">>,
<<"value">> => FileContent4}),
{ok, Code4, _Headers4, Response4} = do_request(Workers, FileName4, put, RequestHeaders4, RawBody4),
?assertEqual(?HTTP_201_CREATED, Code4),
CdmiResponse4 = (json_utils:decode(Response4)),
?assertMatch(#{<<"mimetype">> := <<"text/plain">>}, CdmiResponse4),
RequestHeaders5 = [?CDMI_VERSION_HEADER, user_1_token_header(Config)],
{ok, Code5, _Headers5, Response5} = do_request(Workers, FileName4 ++ "?value;mimetype;valuetransferencoding", get, RequestHeaders5, []),
?assertEqual(?HTTP_200_OK, Code5),
CdmiResponse5 = (json_utils:decode(Response5)),
?assertMatch(#{<<"mimetype">> := <<"text/plain">>}, CdmiResponse5),
TODO VFS-7376 what do we return here if file contains valid utf-8 string and we read byte range ?
?assertMatch(#{<<"value">> := FileContent4}, CdmiResponse5),
FileName6 = filename:join([binary_to_list(SpaceName), "mime_file_noncdmi.txt"]),
FileContent6 = <<"some content">>,
RequestHeaders6 = [{?HDR_CONTENT_TYPE, <<"text/plain; charset=utf-8">>}, user_1_token_header(Config)],
{ok, Code6, _Headers6, _Response6} = do_request(Workers, FileName6, put, RequestHeaders6, FileContent6),
?assertEqual(?HTTP_201_CREATED, Code6),
RequestHeaders7 = [?CDMI_VERSION_HEADER, user_1_token_header(Config)],
{ok, Code7, _Headers7, Response7} = do_request(Workers, FileName6 ++ "?value;mimetype;valuetransferencoding", get, RequestHeaders7, []),
?assertEqual(?HTTP_200_OK, Code7),
CdmiResponse7 = (json_utils:decode(Response7)),
?assertMatch(#{<<"mimetype">> := <<"text/plain">>}, CdmiResponse7),
?assertMatch(#{<<"valuetransferencoding">> := <<"utf-8">>}, CdmiResponse7),
?assertMatch(#{<<"value">> := FileContent6}, CdmiResponse7).
out_of_range(Config) ->
Workers = ?config(op_worker_nodes, Config),
{_SpaceName, _ShortTestDirName, TestDirName, _TestFileName, _FullTestFileName, _TestFileContent} =
create_test_dir_and_file(Config),
[{_SpaceId, SpaceName} | _] = ?config({spaces, <<"user1">>}, Config),
FileName = filename:join([binary_to_list(SpaceName), "random_range_file.txt"]),
{ok, _} = create_file(Config, FileName),
?assertEqual(<<>>, get_file_content(Config, FileName)),
RequestHeaders1 = [user_1_token_header(Config), ?CDMI_VERSION_HEADER],
RequestBody1 = json_utils:encode(#{<<"value">> => <<"data">>}),
{ok, Code1, _Headers1, Response1} = do_request(Workers, FileName ++ "?value:0-3", get, RequestHeaders1, RequestBody1),
?assertEqual(?HTTP_200_OK, Code1),
CdmiResponse1 = (json_utils:decode(Response1)),
?assertMatch(#{<<"value">> := <<>>}, CdmiResponse1),
------ writing at end -------- ( extend file )
?assertEqual(<<>>, get_file_content(Config, FileName)),
RequestHeaders2 = [user_1_token_header(Config), ?CDMI_VERSION_HEADER, ?OBJECT_CONTENT_TYPE_HEADER],
RequestBody2 = json_utils:encode(#{<<"value">> => base64:encode(<<"data">>)}),
{ok, Code2, _Headers2, _Response2} = do_request(Workers, FileName ++ "?value:0-3", put, RequestHeaders2, RequestBody2),
?assertEqual(?HTTP_204_NO_CONTENT, Code2),
?assertEqual(<<"data">>, get_file_content(Config, FileName)),
------ writing at random -------- ( should return zero bytes in any gaps )
{ ok , Code3 , _ Headers3 , _ Response3 } = do_request(Workers , FileName + + " ? value:10 - 13 " , put , RequestHeaders2 , ) ,
? assertEqual(?HTTP_204_NO_CONTENT , Code3 ) ,
----- random childrange ------ ( fail )
{ok, Code4, _Headers4, Response4} = do_request(Workers, TestDirName ++ "/?children:100-132", get, RequestHeaders2, []),
ExpRestError = rest_test_utils:get_rest_error(?ERROR_BAD_DATA(<<"childrenrange">>)),
?assertMatch(ExpRestError, {Code4, json_utils:decode(Response4)}).
move_copy_conflict(Config) ->
Workers = ?config(op_worker_nodes, Config),
[{_SpaceId, SpaceName} | _] = ?config({spaces, <<"user1">>}, Config),
FileName = filename:join([binary_to_list(SpaceName), "move_test_file.txt"]),
FileUri = list_to_binary(filename:join("/", FileName)),
FileData = <<"data">>,
create_file(Config, FileName),
write_to_file(Config, FileName, FileData, 0),
NewMoveFileName = "new_move_test_file",
--- conflicting mv / cpy ------- ( we can not move and copy at the same time )
?assertEqual(FileData, get_file_content(Config, FileName)),
RequestHeaders1 = [user_1_token_header(Config), ?CDMI_VERSION_HEADER, ?OBJECT_CONTENT_TYPE_HEADER],
RequestBody1 = json_utils:encode(#{<<"move">> => FileUri,
<<"copy">> => FileUri}),
{ok, Code1, _Headers1, Response1} = do_request(Workers, NewMoveFileName, put, RequestHeaders1, RequestBody1),
ExpRestError = rest_test_utils:get_rest_error(?ERROR_MALFORMED_DATA),
?assertMatch(ExpRestError, {Code1, json_utils:decode(Response1)}),
?assertEqual(FileData, get_file_content(Config, FileName)).
move(Config) ->
Workers = ?config(op_worker_nodes, Config),
[{_SpaceId, SpaceName} | _] = ?config({spaces, <<"user1">>}, Config),
FileName = filename:join([binary_to_list(SpaceName), "move_test_file.txt"]),
DirName = filename:join([binary_to_list(SpaceName), "move_test_dir"]) ++ "/",
FileData = <<"data">>,
create_file(Config, FileName),
mkdir(Config, DirName),
write_to_file(Config, FileName, FileData, 0),
NewMoveFileName = filename:join([binary_to_list(SpaceName), "new_move_test_file"]),
NewMoveDirName = filename:join([binary_to_list(SpaceName), "new_move_test_dir"]) ++ "/",
?assert(object_exists(Config, DirName)),
?assert(not object_exists(Config, NewMoveDirName)),
RequestHeaders2 = [user_1_token_header(Config), ?CDMI_VERSION_HEADER, ?CONTAINER_CONTENT_TYPE_HEADER],
RequestBody2 = json_utils:encode(#{<<"move">> => list_to_binary(DirName)}),
?assertMatch({ok, ?HTTP_201_CREATED, _Headers2, _Response2}, do_request(Workers, NewMoveDirName, put, RequestHeaders2, RequestBody2)),
?assert(not object_exists(Config, DirName)),
?assert(object_exists(Config, NewMoveDirName)),
?assert(object_exists(Config, FileName)),
?assert(not object_exists(Config, NewMoveFileName)),
?assertEqual(FileData, get_file_content(Config, FileName)),
RequestHeaders3 = [user_1_token_header(Config), ?CDMI_VERSION_HEADER, ?OBJECT_CONTENT_TYPE_HEADER],
RequestBody3 = json_utils:encode(#{<<"move">> => list_to_binary(FileName)}),
?assertMatch({ok, _Code3, _Headers3, _Response3}, do_request(Workers, NewMoveFileName, put, RequestHeaders3, RequestBody3)),
?assert(not object_exists(Config, FileName)),
?assert(object_exists(Config, NewMoveFileName)),
?assertEqual(FileData, get_file_content(Config, NewMoveFileName)).
copy(Config) ->
Workers = ?config(op_worker_nodes, Config),
[{_SpaceId, SpaceName} | _] = ?config({spaces, <<"user1">>}, Config),
---------- file cp ----------- ( copy file , with and acl )
FileName2 = filename:join([binary_to_list(SpaceName), "copy_test_file.txt"]),
UserId1 = ?config({user_id, <<"user1">>}, Config),
UserName1 = ?config({user_name, <<"user1">>}, Config),
create_file(Config, FileName2),
FileData2 = <<"data">>,
FileAcl = [#access_control_entity{
acetype = ?allow_mask,
identifier = UserId1,
name = UserName1,
aceflags = ?no_flags_mask,
acemask = ?all_object_perms_mask
}],
JsonMetadata = #{<<"a">> => <<"b">>, <<"c">> => 2, <<"d">> => []},
Xattrs = [#xattr{name = <<"key1">>, value = <<"value1">>}, #xattr{name = <<"key2">>, value = <<"value2">>}],
ok = set_acl(Config, FileName2, FileAcl),
ok = set_json_metadata(Config, FileName2, JsonMetadata),
ok = add_xattrs(Config, FileName2, Xattrs),
{ok, _} = write_to_file(Config, FileName2, FileData2, 0),
NewFileName2 = filename:join([binary_to_list(SpaceName), "copy_test_file2.txt"]),
?assert(object_exists(Config, FileName2)),
?assert(not object_exists(Config, NewFileName2)),
?assertEqual(FileData2, get_file_content(Config, FileName2)),
?assertEqual({ok, FileAcl}, get_acl(Config, FileName2)),
RequestHeaders4 = [user_1_token_header(Config), ?CDMI_VERSION_HEADER, ?OBJECT_CONTENT_TYPE_HEADER],
RequestBody4 = json_utils:encode(#{<<"copy">> => list_to_binary(FileName2)}),
{ok, Code4, _Headers4, _Response4} = do_request(Workers, NewFileName2, put, RequestHeaders4, RequestBody4),
?assertEqual(?HTTP_201_CREATED, Code4),
?assert(object_exists(Config, FileName2)),
?assert(object_exists(Config, NewFileName2)),
?assertEqual(FileData2, get_file_content(Config, NewFileName2)),
?assertEqual({ok, JsonMetadata}, get_json_metadata(Config, NewFileName2)),
?assertEqual(Xattrs ++ [#xattr{name = ?JSON_METADATA_KEY, value = JsonMetadata}],
get_xattrs(Config, NewFileName2)),
?assertEqual({ok, FileAcl}, get_acl(Config, NewFileName2)),
DirName2 = filename:join([binary_to_list(SpaceName), "copy_dir"]) ++ "/",
NewDirName2 = filename:join([binary_to_list(SpaceName), "new_copy_dir"]) ++ "/",
DirAcl = [#access_control_entity{
acetype = ?allow_mask,
identifier = UserId1,
name = UserName1,
aceflags = ?no_flags_mask,
acemask = ?all_container_perms_mask
}],
mkdir(Config, DirName2),
?assert(object_exists(Config, DirName2)),
set_acl(Config, DirName2, DirAcl),
add_xattrs(Config, DirName2, Xattrs),
mkdir(Config, filename:join(DirName2, "dir1")),
mkdir(Config, filename:join(DirName2, "dir2")),
create_file(Config, filename:join([DirName2, "dir1", "1"])),
create_file(Config, filename:join([DirName2, "dir1", "2"])),
create_file(Config, filename:join(DirName2, "3")),
?assert(object_exists(Config, DirName2)),
?assert(object_exists(Config, filename:join(DirName2, "dir1"))),
?assert(object_exists(Config, filename:join(DirName2, "dir2"))),
?assert(object_exists(Config, filename:join([DirName2, "dir1", "1"]))),
?assert(object_exists(Config, filename:join([DirName2, "dir1", "2"]))),
?assert(object_exists(Config, filename:join(DirName2, "3"))),
?assert(not object_exists(Config, NewDirName2)),
copy dir using
RequestHeaders5 = [user_1_token_header(Config), ?CDMI_VERSION_HEADER, ?CONTAINER_CONTENT_TYPE_HEADER],
RequestBody5 = json_utils:encode(#{<<"copy">> => list_to_binary(DirName2)}),
{ok, Code5, _Headers5, _Response5} = do_request(Workers, NewDirName2, put, RequestHeaders5, RequestBody5),
?assertEqual(?HTTP_201_CREATED, Code5),
?assert(object_exists(Config, DirName2)),
?assert(object_exists(Config, filename:join(DirName2, "dir1"))),
?assert(object_exists(Config, filename:join(DirName2, "dir2"))),
?assert(object_exists(Config, filename:join([DirName2, "dir1", "1"]))),
?assert(object_exists(Config, filename:join([DirName2, "dir1", "2"]))),
?assert(object_exists(Config, filename:join(DirName2, "3"))),
?assert(object_exists(Config, NewDirName2)),
?assertEqual(Xattrs, get_xattrs(Config, NewDirName2)),
?assertEqual({ok, DirAcl}, get_acl(Config, NewDirName2)),
?assert(object_exists(Config, filename:join(NewDirName2, "dir1"))),
?assert(object_exists(Config, filename:join(NewDirName2, "dir2"))),
?assert(object_exists(Config, filename:join([NewDirName2, "dir1", "1"]))),
?assert(object_exists(Config, filename:join([NewDirName2, "dir1", "2"]))),
?assert(object_exists(Config, filename:join(NewDirName2, "3"))).
partial_upload(Config) ->
Workers = ?config(op_worker_nodes, Config),
[{_SpaceId, SpaceName} | _] = ?config({spaces, <<"user1">>}, Config),
FileName = filename:join([binary_to_list(SpaceName), "partial.txt"]),
FileName2 = filename:join([binary_to_list(SpaceName), "partial2.txt"]),
Chunk1 = <<"some">>,
Chunk2 = <<"_">>,
Chunk3 = <<"value">>,
------ request partial upload ------
?assert(not object_exists(Config, FileName)),
RequestHeaders1 = [user_1_token_header(Config), ?CDMI_VERSION_HEADER, ?OBJECT_CONTENT_TYPE_HEADER, {"X-CDMI-Partial", "true"}],
RequestBody1 = json_utils:encode(#{<<"value">> => Chunk1}),
{ok, Code1, _Headers1, Response1} = do_request(Workers, FileName, put, RequestHeaders1, RequestBody1),
?assertEqual(?HTTP_201_CREATED, Code1),
CdmiResponse1 = (json_utils:decode(Response1)),
?assertMatch(#{<<"completionStatus">> := <<"Processing">>}, CdmiResponse1),
upload second chunk of file
RequestBody2 = json_utils:encode(#{<<"value">> => base64:encode(Chunk2)}),
{ok, Code2, _Headers2, _Response2} = do_request(Workers, FileName ++ "?value:4-4", put, RequestHeaders1, RequestBody2),
?assertEqual(?HTTP_204_NO_CONTENT, Code2),
upload third chunk of file
RequestHeaders3 = [user_1_token_header(Config), ?CDMI_VERSION_HEADER, ?OBJECT_CONTENT_TYPE_HEADER],
RequestBody3 = json_utils:encode(#{<<"value">> => base64:encode(Chunk3)}),
{ok, Code3, _Headers3, _Response3} = do_request(Workers, FileName ++ "?value:5-9", put, RequestHeaders3, RequestBody3),
?assertEqual(?HTTP_204_NO_CONTENT, Code3),
RequestHeaders4 = [user_1_token_header(Config), ?CDMI_VERSION_HEADER],
TODO Verify once after
CheckAllChunks = fun() ->
{ok, Code4, _Headers4, Response4} = do_request(Workers, FileName, get, RequestHeaders4, []),
?assertEqual(?HTTP_200_OK, Code4),
CdmiResponse4 = (json_utils:decode(Response4)),
?assertMatch(#{<<"completionStatus">> := <<"Complete">>}, CdmiResponse4),
?assertMatch(#{<<"valuetransferencoding">> := <<"utf-8">>}, CdmiResponse4),
maps:get(<<"value">>, CdmiResponse4)
end,
Chunks123 = <<Chunk1/binary, Chunk2/binary, Chunk3/binary>>,
?assertMatch(Chunks123, CheckAllChunks(), 2),
?assert(not object_exists(Config, FileName2)),
RequestHeaders5 = [user_1_token_header(Config), {<<"X-CDMI-Partial">>, <<"true">>}],
{ok, Code5, _Headers5, _Response5} = do_request(Workers, FileName2, put, RequestHeaders5, Chunk1),
?assertEqual(?HTTP_201_CREATED, Code5),
{ok, Code5_1, _Headers5_1, Response5_1} = do_request(Workers, FileName2 ++ "?completionStatus", get, RequestHeaders4, Chunk1),
CdmiResponse5_1 = (json_utils:decode(Response5_1)),
?assertEqual(?HTTP_200_OK, Code5_1),
?assertMatch(#{<<"completionStatus">> := <<"Processing">>}, CdmiResponse5_1),
upload second chunk of file
RequestHeaders6 = [user_1_token_header(Config), {?HDR_CONTENT_RANGE, <<"bytes 4-4/10">>}, {<<"X-CDMI-Partial">>, <<"true">>}],
{ok, Code6, _Headers6, _Response6} = do_request(Workers, FileName2, put, RequestHeaders6, Chunk2),
?assertEqual(?HTTP_204_NO_CONTENT, Code6),
upload third chunk of file
RequestHeaders7 = [user_1_token_header(Config), {?HDR_CONTENT_RANGE, <<"bytes 5-9/10">>}, {<<"X-CDMI-Partial">>, <<"false">>}],
{ok, Code7, _Headers7, _Response7} = do_request(Workers, FileName2, put, RequestHeaders7, Chunk3),
?assertEqual(?HTTP_204_NO_CONTENT, Code7),
RequestHeaders8 = [user_1_token_header(Config), ?CDMI_VERSION_HEADER],
TODO Verify once after
CheckAllChunks2 = fun() ->
{ok, Code8, _Headers8, Response8} = do_request(Workers, FileName2, get, RequestHeaders8, []),
?assertEqual(?HTTP_200_OK, Code8),
CdmiResponse8 = (json_utils:decode(Response8)),
?assertMatch(#{<<"completionStatus">> := <<"Complete">>}, CdmiResponse8),
base64:decode(maps:get(<<"value">>, CdmiResponse8))
end,
?assertMatch(Chunks123, CheckAllChunks2(), 2).
acl(Config) ->
[_WorkerP2, WorkerP1] = Workers = ?config(op_worker_nodes, Config),
[{_SpaceId, SpaceName} | _] = ?config({spaces, <<"user1">>}, Config),
Filename1 = filename:join([binary_to_list(SpaceName), "acl_test_file1"]),
Dirname1 = filename:join([binary_to_list(SpaceName), "acl_test_dir1"]) ++ "/",
UserId1 = ?config({user_id, <<"user1">>}, Config),
UserName1 = ?config({user_name, <<"user1">>}, Config),
Identifier1 = <<UserName1/binary, "#", UserId1/binary>>,
Read = ace:to_json(#access_control_entity{
acetype = ?allow_mask,
identifier = UserId1,
name = UserName1,
aceflags = ?no_flags_mask,
acemask = ?read_all_object_mask
}, cdmi),
ReadFull = #{
<<"acetype">> => ?allow,
<<"identifier">> => Identifier1,
<<"aceflags">> => ?no_flags,
<<"acemask">> => <<
?read_object/binary, ",",
?read_metadata/binary, ",",
?read_attributes/binary, ",",
?read_acl/binary
>>
},
Write = ace:to_json(#access_control_entity{
acetype = ?allow_mask,
identifier = UserId1,
name = UserName1,
aceflags = ?no_flags_mask,
acemask = ?write_all_object_mask
}, cdmi),
ReadWriteVerbose = #{
<<"acetype">> => ?allow,
<<"identifier">> => Identifier1,
<<"aceflags">> => ?no_flags,
<<"acemask">> => <<
?read_object/binary, ",",
?read_metadata/binary, ",",
?read_attributes/binary, ",",
?read_acl/binary, ",",
?write_object/binary, ",",
?write_metadata/binary, ",",
?write_attributes/binary, ",",
?delete/binary, ",",
?write_acl/binary
>>
},
WriteAcl = ace:to_json(#access_control_entity{
acetype = ?allow_mask,
identifier = UserId1,
name = UserName1,
aceflags = ?no_flags_mask,
acemask = ?write_acl_mask
}, cdmi),
Delete = ace:to_json(#access_control_entity{
acetype = ?allow_mask,
identifier = UserId1,
name = UserName1,
aceflags = ?no_flags_mask,
acemask = ?delete_mask
}, cdmi),
MetadataAclReadFull = json_utils:encode(#{<<"metadata">> => #{<<"cdmi_acl">> => [ReadFull, WriteAcl]}}),
MetadataAclDelete = json_utils:encode(#{<<"metadata">> => #{<<"cdmi_acl">> => [Delete]}}),
MetadataAclWrite = json_utils:encode(#{<<"metadata">> => #{<<"cdmi_acl">> => [Write]}}),
MetadataAclReadWrite = json_utils:encode(#{<<"metadata">> => #{<<"cdmi_acl">> => [Write, Read]}}),
MetadataAclReadWriteFull = json_utils:encode(#{<<"metadata">> => #{<<"cdmi_acl">> => [ReadWriteVerbose]}}),
?assert(not object_exists(Config, Filename1)),
create_file(Config, filename:join("/", Filename1)),
write_to_file(Config, Filename1, <<"data">>, 0),
EaccesError = rest_test_utils:get_rest_error(?ERROR_POSIX(?EACCES)),
set acl to ' write ' and test cdmi / non - cdmi get request ( should return 403 forbidden )
RequestHeaders1 = [user_1_token_header(Config), ?CDMI_VERSION_HEADER, ?OBJECT_CONTENT_TYPE_HEADER],
{ok, ?HTTP_204_NO_CONTENT, _, _} = do_request(Workers, Filename1, put, RequestHeaders1, MetadataAclWrite),
{ok, Code1, _, Response1} = do_request(Workers, Filename1, get, RequestHeaders1, []),
?assertMatch(EaccesError, {Code1, json_utils:decode(Response1)}),
{ok, Code2, _, Response2} = do_request(Workers, Filename1, get, [user_1_token_header(Config)], []),
?assertMatch(EaccesError, {Code2, json_utils:decode(Response2)}),
?assertEqual({error, ?EACCES}, open_file(WorkerP1, Config, Filename1, read)),
{ok, ?HTTP_204_NO_CONTENT, _, _} = do_request(Workers, Filename1, put, RequestHeaders1, MetadataAclReadWriteFull),
{ok, ?HTTP_200_OK, _, _} = do_request(Workers, Filename1, get, RequestHeaders1, []),
{ok, ?HTTP_200_OK, _, _} = do_request(Workers, Filename1, get, [user_1_token_header(Config)], []),
{ok, ?HTTP_204_NO_CONTENT, _, _} = do_request(Workers, Filename1, put, RequestHeaders1, MetadataAclReadWrite),
RequestBody4 = json_utils:encode(#{<<"value">> => <<"new_data">>}),
{ok, ?HTTP_204_NO_CONTENT, _, _} = do_request(Workers, Filename1, put, RequestHeaders1, RequestBody4),
?assertEqual(<<"new_data">>, get_file_content(Config, Filename1)),
write_to_file(Config, Filename1, <<"1">>, 8),
?assertEqual(<<"new_data1">>, get_file_content(Config, Filename1)),
{ok, ?HTTP_204_NO_CONTENT, _, _} = do_request(Workers, Filename1, put, [user_1_token_header(Config)], <<"new_data2">>),
?assertEqual(<<"new_data2">>, get_file_content(Config, Filename1)),
set acl to ' read ' and test cdmi / non - cdmi put request ( should return 403 forbidden )
{ok, ?HTTP_204_NO_CONTENT, _, _} = do_request(Workers, Filename1, put, RequestHeaders1, MetadataAclReadFull),
RequestBody6 = json_utils:encode(#{<<"value">> => <<"new_data3">>}),
{ok, Code3, _, Response3} = do_request(Workers, Filename1, put, RequestHeaders1, RequestBody6),
?assertMatch(EaccesError, {Code3, json_utils:decode(Response3)}),
{ok, Code4, _, Response4} = do_request(Workers, Filename1, put, [user_1_token_header(Config)], <<"new_data4">>),
?assertMatch(EaccesError, {Code4, json_utils:decode(Response4)}),
?assertEqual(<<"new_data2">>, get_file_content(Config, Filename1)),
?assertEqual({error, ?EACCES}, open_file(WorkerP1, Config, Filename1, write)),
?assertEqual(<<"new_data2">>, get_file_content(Config, Filename1)),
{ok, ?HTTP_204_NO_CONTENT, _, _} = do_request(Workers, Filename1, put, RequestHeaders1, MetadataAclDelete),
{ok, ?HTTP_204_NO_CONTENT, _, _} = do_request(Workers, Filename1, delete, [user_1_token_header(Config)], []),
?assert(not object_exists(Config, Filename1)),
?assert(not object_exists(Config, Dirname1)),
mkdir(Config, filename:join("/", Dirname1)),
File1 = filename:join(Dirname1, "1"),
File2 = filename:join(Dirname1, "2"),
File3 = filename:join(Dirname1, "3"),
File4 = filename:join(Dirname1, "4"),
DirRead = ace:to_json(#access_control_entity{
acetype = ?allow_mask,
identifier = UserId1,
name = UserName1,
aceflags = ?no_flags_mask,
acemask = ?read_all_container_mask bor ?traverse_container_mask
}, cdmi),
DirWrite = ace:to_json(#access_control_entity{
acetype = ?allow_mask,
identifier = UserId1,
name = UserName1,
aceflags = ?no_flags_mask,
acemask = ?write_all_container_mask bor ?traverse_container_mask
}, cdmi),
DirMetadataAclReadWrite = json_utils:encode(#{<<"metadata">> => #{<<"cdmi_acl">> => [DirWrite, DirRead]}}),
DirMetadataAclRead = json_utils:encode(#{<<"metadata">> => #{<<"cdmi_acl">> => [DirRead, WriteAcl]}}),
DirMetadataAclWrite = json_utils:encode(#{<<"metadata">> => #{<<"cdmi_acl">> => [DirWrite]}}),
RequestHeaders2 = [user_1_token_header(Config), ?CDMI_VERSION_HEADER, ?CONTAINER_CONTENT_TYPE_HEADER],
{ok, ?HTTP_204_NO_CONTENT, _, _} = do_request(Workers, Dirname1, put, RequestHeaders2, DirMetadataAclReadWrite),
{ok, ?HTTP_200_OK, _, _} = do_request(Workers, Dirname1, get, RequestHeaders2, []),
{ok, ?HTTP_201_CREATED, _, _} = do_request(Workers, File1, put, [user_1_token_header(Config)], []),
?assert(object_exists(Config, File1)),
{ok, ?HTTP_201_CREATED, _, _} = do_request(Workers, File2, put, RequestHeaders1, <<"{\"value\":\"val\"}">>),
?assert(object_exists(Config, File2)),
create_file(Config, File3),
?assert(object_exists(Config, File3)),
{ok, ?HTTP_204_NO_CONTENT, _, _} = do_request(Workers, File1, delete, [user_1_token_header(Config)], []),
?assert(not object_exists(Config, File1)),
{ok, ?HTTP_204_NO_CONTENT, _, _} = do_request(Workers, File2, delete, [user_1_token_header(Config)], []),
?assert(not object_exists(Config, File2)),
set acl to ' write ' and test cdmi get request ( should return 403 forbidden )
{ok, ?HTTP_204_NO_CONTENT, _, _} = do_request(Workers, Dirname1, put, RequestHeaders2, DirMetadataAclWrite),
{ok, Code5, _, Response5} = do_request(Workers, Dirname1, get, RequestHeaders2, []),
?assertMatch(EaccesError, {Code5, json_utils:decode(Response5)}),
set acl to ' read ' and test cdmi put request ( should return 403 forbidden )
{ok, ?HTTP_204_NO_CONTENT, _, _} = do_request(Workers, Dirname1, put, RequestHeaders2, DirMetadataAclRead),
{ok, ?HTTP_200_OK, _, _} = do_request(Workers, Dirname1, get, RequestHeaders2, []),
{ok, Code6, _, Response6} = do_request(Workers, Dirname1, put, RequestHeaders2, json_utils:encode(#{<<"metadata">> => #{<<"my_meta">> => <<"value">>}})),
?assertMatch(EaccesError, {Code6, json_utils:decode(Response6)}),
create files ( should return 403 forbidden )
{ok, Code7, _, Response7} = do_request(Workers, File1, put, [user_1_token_header(Config)], []),
?assertMatch(EaccesError, {Code7, json_utils:decode(Response7)}),
?assert(not object_exists(Config, File1)),
{ok, Code8, _, Response8} = do_request(Workers, File2, put, RequestHeaders1, <<"{\"value\":\"val\"}">>),
?assertMatch(EaccesError, {Code8, json_utils:decode(Response8)}),
?assert(not object_exists(Config, File2)),
?assertEqual({error, ?EACCES}, create_file(Config, File4)),
?assert(not object_exists(Config, File4)),
delete files ( should return 403 forbidden )
{ok, Code9, _, Response9} = do_request(Workers, File3, delete, [user_1_token_header(Config)], []),
?assertMatch(EaccesError, {Code9, json_utils:decode(Response9)}),
?assert(object_exists(Config, File3)).
errors(Config) ->
Workers = ?config(op_worker_nodes, Config),
{SpaceName, _ShortTestDirName, TestDirName, _TestFileName, _FullTestFileName, _TestFileContent} =
create_test_dir_and_file(Config),
{ok, Code1, _Headers1, Response1} =
do_request(Workers, TestDirName, get, [], []),
ExpRestError1 = rest_test_utils:get_rest_error(?ERROR_UNAUTHORIZED),
?assertMatch(ExpRestError1, {Code1, json_utils:decode(Response1)}),
RequestHeaders2 = [
user_1_token_header(Config),
?CDMI_VERSION_HEADER,
?CONTAINER_CONTENT_TYPE_HEADER
],
{ok, Code2, _Headers2, Response2} =
do_request(Workers, SpaceName ++ "/test_dir", put, RequestHeaders2, []),
ExpRestError2 = rest_test_utils:get_rest_error(?ERROR_BAD_VALUE_IDENTIFIER(<<"path">>)),
?assertMatch(ExpRestError2, {Code2, json_utils:decode(Response2)}),
---- wrong create path 2 -----
RequestHeaders3 = [
user_1_token_header(Config),
?CDMI_VERSION_HEADER,
?OBJECT_CONTENT_TYPE_HEADER
],
{ok, Code3, _Headers3, Response3} =
do_request(Workers, SpaceName ++ "/test_dir/", put, RequestHeaders3, []),
ExpRestError3 = rest_test_utils:get_rest_error(?ERROR_BAD_VALUE_IDENTIFIER(<<"path">>)),
?assertMatch(ExpRestError3, {Code3, json_utils:decode(Response3)}),
RequestHeaders4 = [
user_1_token_header(Config),
?CDMI_VERSION_HEADER,
?OBJECT_CONTENT_TYPE_HEADER
],
RequestBody4 = json_utils:encode(#{<<"valuetransferencoding">> => <<"base64">>,
<<"value">> => <<"#$%">>}),
{ok, Code4, _Headers4, Response4} =
do_request(Workers, SpaceName ++ "/some_file_b64", put, RequestHeaders4, RequestBody4),
ExpRestError4 = rest_test_utils:get_rest_error(?ERROR_BAD_DATA(<<"base64">>)),
?assertMatch(ExpRestError4, {Code4, json_utils:decode(Response4)}),
RequestHeaders6 = [
user_1_token_header(Config),
?CDMI_VERSION_HEADER,
?OBJECT_CONTENT_TYPE_HEADER
],
{ok, Code6, _Headers6, _Response6} =
do_request(Workers, SpaceName ++ "/nonexistent_file", get, RequestHeaders6),
?assertEqual(Code6, ?HTTP_404_NOT_FOUND),
RequestHeaders7 = [
user_1_token_header(Config),
?CDMI_VERSION_HEADER,
?CONTAINER_CONTENT_TYPE_HEADER
],
{ok, Code7, _Headers7, _Response7} =
do_request(Workers, SpaceName ++ "/nonexisting_dir/", get, RequestHeaders7),
?assertEqual(Code7, ?HTTP_404_NOT_FOUND),
File8 = filename:join([SpaceName, "file8"]),
FileContent8 = <<"File content...">>,
create_file(Config, File8),
?assertEqual(object_exists(Config, File8), true),
write_to_file(Config, File8, FileContent8, ?FILE_BEGINNING),
?assertEqual(get_file_content(Config, File8), FileContent8),
RequestHeaders8 = [user_1_token_header(Config)],
mock_opening_file_without_perms(Config),
{ok, Code8, _Headers8, Response8} =
do_request(Workers, File8, get, RequestHeaders8),
unmock_opening_file_without_perms(Config),
ExpRestError8 = rest_test_utils:get_rest_error(?ERROR_POSIX(?EACCES)),
?assertMatch(ExpRestError8, {Code8, json_utils:decode(Response8)}),
File9 = filename:join([SpaceName, "file9"]),
FileContent9 = <<"File content...">>,
create_file(Config, File9),
?assertEqual(object_exists(Config, File9), true),
write_to_file(Config, File9, FileContent9, ?FILE_BEGINNING),
?assertEqual(get_file_content(Config, File9), FileContent9),
RequestHeaders9 = [
user_1_token_header(Config),
?CDMI_VERSION_HEADER,
?OBJECT_CONTENT_TYPE_HEADER
],
mock_opening_file_without_perms(Config),
{ok, Code9, _Headers9, Response9} =
do_request(Workers, File9, get, RequestHeaders9),
unmock_opening_file_without_perms(Config),
ExpRestError9 = rest_test_utils:get_rest_error(?ERROR_POSIX(?EACCES)),
?assertMatch(ExpRestError9, {Code9, json_utils:decode(Response9)}).
accept_header(Config) ->
Workers = ?config(op_worker_nodes, Config),
AcceptHeader = {?HDR_ACCEPT, <<"*/*">>},
{ok, Code1, _Headers1, _Response1} =
do_request(Workers, [], get,
[user_1_token_header(Config), ?CDMI_VERSION_HEADER, AcceptHeader], []),
?assertEqual(?HTTP_200_OK, Code1).
create_raw_file_with_cdmi_version_header_should_succeed(Config) ->
Workers = ?config(op_worker_nodes, Config),
[{_SpaceId, SpaceName} | _] = ?config({spaces, <<"user1">>}, Config),
?assertMatch(
{ok, ?HTTP_201_CREATED, _ResponseHeaders, _Response},
do_request(Workers, binary_to_list(SpaceName) ++ "/file1", put,
[?CDMI_VERSION_HEADER, user_1_token_header(Config)], <<"data">>
)),
?assertMatch(
{ok, ?HTTP_201_CREATED, _ResponseHeaders2, _Response2},
do_request(Workers, binary_to_list(SpaceName) ++ "/file2", put,
[?CDMI_VERSION_HEADER, user_1_token_header(Config), {?HDR_CONTENT_TYPE, <<"text/plain">>}],
<<"data2">>
)).
create_raw_dir_with_cdmi_version_header_should_succeed(Config) ->
Workers = ?config(op_worker_nodes, Config),
[{_SpaceId, SpaceName} | _] = ?config({spaces, <<"user1">>}, Config),
?assertMatch(
{ok, ?HTTP_201_CREATED, _ResponseHeaders, _Response},
do_request(Workers, binary_to_list(SpaceName) ++ "/dir1/", put,
[?CDMI_VERSION_HEADER, user_1_token_header(Config)]
)),
?assertMatch(
{ok, ?HTTP_201_CREATED, _ResponseHeaders2, _Response2},
do_request(Workers, binary_to_list(SpaceName) ++ "/dir2/", put,
[?CDMI_VERSION_HEADER, user_1_token_header(Config), {?HDR_CONTENT_TYPE, <<"application/json">>}],
<<"{}">>
)).
create_cdmi_file_without_cdmi_version_header_should_fail(Config) ->
Workers = ?config(op_worker_nodes, Config),
[{_SpaceId, SpaceName} | _] = ?config({spaces, <<"user1">>}, Config),
{ok, Code, _ResponseHeaders, Response} = do_request(
Workers, binary_to_list(SpaceName) ++ "/file1", put,
[user_1_token_header(Config), ?OBJECT_CONTENT_TYPE_HEADER], <<"{}">>
),
ExpRestError = rest_test_utils:get_rest_error(?ERROR_MISSING_REQUIRED_VALUE(<<"version">>)),
?assertMatch(ExpRestError, {Code, json_utils:decode(Response)}).
create_cdmi_dir_without_cdmi_version_header_should_fail(Config) ->
Workers = ?config(op_worker_nodes, Config),
[{_SpaceId, SpaceName} | _] = ?config({spaces, <<"user1">>}, Config),
{ok, Code, _ResponseHeaders, Response} = do_request(
Workers, binary_to_list(SpaceName) ++ "/dir1/", put,
[user_1_token_header(Config), ?CONTAINER_CONTENT_TYPE_HEADER]
),
ExpRestError = rest_test_utils:get_rest_error(?ERROR_MISSING_REQUIRED_VALUE(<<"version">>)),
?assertMatch(ExpRestError, {Code, json_utils:decode(Response)}).
SetUp and TearDown functions
Internal functions
do_request(Node, RestSubpath, Method, Headers) ->
do_request(Node, RestSubpath, Method, Headers, []).
do_request([_ | _] = Nodes, RestSubpath, get, Headers, Body) ->
[FRes | _] = Responses = lists:filtermap(fun(Node) ->
case make_request(Node, RestSubpath, get, Headers, Body) of
space_not_supported -> false;
Result -> {true, Result}
end
end, Nodes),
case FRes of
{error, _} ->
ok;
{ok, ?HTTP_206_PARTIAL_CONTENT, #{?HDR_CONTENT_TYPE := <<"multipart/byteranges", _/binary>>}, _} ->
lists:foreach(fun({ok, LCode, _, _}) ->
?assertMatch(?HTTP_206_PARTIAL_CONTENT, LCode)
end, Responses);
{ok, RCode, _, RResponse} ->
RResponseJSON = try_to_decode(RResponse),
lists:foreach(fun({ok, LCode, _, LResponse}) ->
LResponseJSON = try_to_decode(LResponse),
?assertMatch({RCode, RResponseJSON}, {LCode, LResponseJSON})
end, Responses)
end,
FRes;
do_request([_ | _] = Nodes, RestSubpath, Method, Headers, Body) ->
lists:foldl(fun
(Node, space_not_supported) ->
make_request(Node, RestSubpath, Method, Headers, Body);
(_Node, Result) ->
Result
end, space_not_supported, lists_utils:shuffle(Nodes));
do_request(Node, RestSubpath, Method, Headers, Body) when is_atom(Node) ->
make_request(Node, RestSubpath, Method, Headers, Body).
make_request(Node, RestSubpath, Method, Headers, Body) ->
case cdmi_test_utils:do_request(Node, RestSubpath, Method, Headers, Body) of
{ok, RespCode, _RespHeaders, RespBody} = Result ->
case is_space_supported(Node, RestSubpath) of
true ->
Result;
false ->
?assert(RespCode >= 300),
case {RespCode, try_to_decode(RespBody)} of
{?HTTP_400_BAD_REQUEST, #{<<"error">> := #{
<<"id">> := <<"spaceNotSupportedBy">>
}}}->
space_not_supported;
_ ->
Result
end
end;
{error, _} = Error ->
Error
end.
is_space_supported(_Node, "") ->
true;
is_space_supported(_Node, "/") ->
true;
is_space_supported(Node, CdmiPath) ->
{ok, SuppSpaces} = rpc:call(Node, provider_logic, get_spaces, []),
SpecialObjectIds = [?ROOT_CAPABILITY_ID, ?CONTAINER_CAPABILITY_ID, ?DATAOBJECT_CAPABILITY_ID],
case binary:split(list_to_binary(CdmiPath), <<"/">>, [global, trim_all]) of
[<<"cdmi_capabilities">> | _] ->
true;
[<<"cdmi_objectid">>, ObjectId | _] ->
case lists:member(ObjectId, SpecialObjectIds) of
true ->
true;
false ->
{ok, FileGuid} = file_id:objectid_to_guid(ObjectId),
SpaceId = file_id:guid_to_space_id(FileGuid),
SpaceId == <<"rootDirVirtualSpaceId">> orelse lists:member(SpaceId, SuppSpaces)
end;
[SpaceName | _] ->
lists:any(fun(SpaceId) -> get_space_name(Node, SpaceId) == SpaceName end, SuppSpaces)
end.
get_space_name(Node, SpaceId) ->
{ok, SpaceName} = rpc:call(Node, space_logic, get_name, [<<"0">>, SpaceId]),
SpaceName.
try_to_decode(Body) ->
try
remove_times_metadata(json_utils:decode(Body))
catch _:invalid_json ->
Body
end.
remove_times_metadata(ResponseJSON) ->
Metadata = maps:get(<<"metadata">>, ResponseJSON, undefined),
case Metadata of
undefined -> ResponseJSON;
_ -> Metadata1 = maps:without( [<<"cdmi_ctime">>,
<<"cdmi_atime">>,
<<"cdmi_mtime">>], Metadata),
maps:put(<<"metadata">>, Metadata1, ResponseJSON)
end.
create_test_dir_and_file(Config) ->
[{_SpaceId, SpaceName} | _] = ?config({spaces, <<"user1">>}, Config),
TestDirName = get_random_string(),
TestFileName = get_random_string(),
FullTestDirName = filename:join([binary_to_list(SpaceName), TestDirName]),
FullTestFileName = filename:join(["/", binary_to_list(SpaceName), TestDirName, TestFileName]),
TestFileContent = <<"test_file_content">>,
case object_exists(Config, TestDirName) of
false ->
{ok, _} = mkdir(Config, FullTestDirName),
?assert(object_exists(Config, FullTestDirName)),
{ok, _} = create_file(Config, FullTestFileName),
?assert(object_exists(Config, FullTestFileName)),
{ok, _} = write_to_file(Config, FullTestFileName, TestFileContent, 0),
?assertEqual(TestFileContent, get_file_content(Config, FullTestFileName));
true -> ok
end,
{binary_to_list(SpaceName), TestDirName, FullTestDirName, TestFileName, FullTestFileName, TestFileContent}.
object_exists(Config, Path) ->
[_WorkerP2, WorkerP1] = ?config(op_worker_nodes, Config),
SessionId = ?config({session_id, {<<"user1">>, ?GET_DOMAIN(WorkerP1)}}, Config),
case lfm_proxy:stat(WorkerP1, SessionId,
{path, absolute_binary_path(Path)}) of
{ok, _} ->
true;
{error, ?ENOENT} ->
false
end.
create_file(Config, Path) ->
[_WorkerP2, WorkerP1] = ?config(op_worker_nodes, Config),
SessionId = ?config({session_id, {<<"user1">>, ?GET_DOMAIN(WorkerP1)}}, Config),
lfm_proxy:create(WorkerP1, SessionId, absolute_binary_path(Path)).
open_file(Worker, Config, Path, OpenMode) ->
SessionId = ?config({session_id, {<<"user1">>, ?GET_DOMAIN(Worker)}}, Config),
lfm_proxy:open(Worker, SessionId, {path, absolute_binary_path(Path)}, OpenMode).
write_to_file(Config, Path, Data, Offset) ->
[_WorkerP2, WorkerP1] = ?config(op_worker_nodes, Config),
{ok, FileHandle} = open_file(WorkerP1, Config, Path, write),
Result = lfm_proxy:write(WorkerP1, FileHandle, Offset, Data),
lfm_proxy:close(WorkerP1, FileHandle),
Result.
get_file_content(Config, Path) ->
[WorkerP2, _WorkerP1] = ?config(op_worker_nodes, Config),
{ok, FileHandle} = open_file(WorkerP2, Config, Path, read),
Result = case lfm_proxy:read(WorkerP2, FileHandle, ?FILE_BEGINNING, ?INFINITY) of
{error, Error} -> {error, Error};
{ok, Content} -> Content
end,
lfm_proxy:close(WorkerP2, FileHandle),
Result.
mkdir(Config, Path) ->
[_WorkerP2, WorkerP1] = ?config(op_worker_nodes, Config),
SessionId = ?config({session_id, {<<"user1">>, ?GET_DOMAIN(WorkerP1)}}, Config),
lfm_proxy:mkdir(WorkerP1, SessionId, absolute_binary_path(Path)).
set_acl(Config, Path, Acl) ->
[_WorkerP2, WorkerP1] = ?config(op_worker_nodes, Config),
SessionId = ?config({session_id, {<<"user1">>, ?GET_DOMAIN(WorkerP1)}}, Config),
lfm_proxy:set_acl(WorkerP1, SessionId, {path, absolute_binary_path(Path)}, Acl).
get_acl(Config, Path) ->
[WorkerP2, _WorkerP1] = ?config(op_worker_nodes, Config),
SessionId = ?config({session_id, {<<"user1">>, ?GET_DOMAIN(WorkerP2)}}, Config),
lfm_proxy:get_acl(WorkerP2, SessionId, {path, absolute_binary_path(Path)}).
add_xattrs(Config, Path, Xattrs) ->
[_WorkerP2, WorkerP1] = ?config(op_worker_nodes, Config),
SessionId = ?config({session_id, {<<"user1">>, ?GET_DOMAIN(WorkerP1)}}, Config),
lists:foreach(fun(Xattr) ->
ok = lfm_proxy:set_xattr(WorkerP1, SessionId, {path, absolute_binary_path(Path)}, Xattr)
end, Xattrs).
get_xattrs(Config, Path) ->
[WorkerP2, _WorkerP1] = ?config(op_worker_nodes, Config),
SessionId = ?config({session_id, {<<"user1">>, ?GET_DOMAIN(WorkerP2)}}, Config),
{ok, Xattrs} = lfm_proxy:list_xattr(WorkerP2, SessionId, {path, absolute_binary_path(Path)}, false, true),
lists:filtermap(
fun
(<<"cdmi_", _/binary>>) ->
false;
(XattrName) ->
{ok, Xattr} = lfm_proxy:get_xattr(WorkerP2, SessionId, {path, absolute_binary_path(Path)}, XattrName),
{true, Xattr}
end, Xattrs).
set_json_metadata(Config, Path, JsonTerm) ->
[WorkerP2, _WorkerP1] = ?config(op_worker_nodes, Config),
SessionId = ?config({session_id, {<<"user1">>, ?GET_DOMAIN(WorkerP2)}}, Config),
{ok, FileGuid} = lfm_proxy:resolve_guid(WorkerP2, SessionId, absolute_binary_path(Path)),
ok = opt_file_metadata:set_custom_metadata(WorkerP2, SessionId, ?FILE_REF(FileGuid), json, JsonTerm, []).
get_json_metadata(Config, Path) ->
[WorkerP2, _WorkerP1] = ?config(op_worker_nodes, Config),
SessionId = ?config({session_id, {<<"user1">>, ?GET_DOMAIN(WorkerP2)}}, Config),
{ok, FileGuid} = lfm_proxy:resolve_guid(WorkerP2, SessionId, absolute_binary_path(Path)),
opt_file_metadata:get_custom_metadata(WorkerP2, SessionId, ?FILE_REF(FileGuid), json, [], false).
absolute_binary_path(Path) ->
list_to_binary(ensure_begins_with_slash(Path)).
ensure_begins_with_slash(Path) ->
ReversedBinary = list_to_binary(lists:reverse(Path)),
lists:reverse(binary_to_list(filepath_utils:ensure_ends_with_slash(ReversedBinary))).
mock_opening_file_without_perms(Config) ->
Workers = ?config(op_worker_nodes, Config),
test_node_starter:load_modules(Workers, [?MODULE]),
test_utils:mock_new(Workers, lfm),
test_utils:mock_expect(
Workers, lfm, monitored_open, fun(_, _, _) -> {error, ?EACCES} end).
unmock_opening_file_without_perms(Config) ->
Workers = ?config(op_worker_nodes, Config),
test_utils:mock_unload(Workers, lfm).
get_random_string() ->
get_random_string(10, "abcdefghijklmnopqrstuvwxyz1234567890ABCDEFGHIJKLMNOPQRSTUVWXYZ").
get_random_string(Length, AllowedChars) ->
lists:foldl(fun(_, Acc) ->
[lists:nth(rand:uniform(length(AllowedChars)),
AllowedChars)]
++ Acc
end, [], lists:seq(1, Length)).
|
61d6c2ab784bd1367757b7a8409172d849a7678f7ff73c38743ff3ff20473123 | GaloisInc/pate | TestBase.hs | {-# LANGUAGE GADTs #-}
# LANGUAGE FlexibleContexts #
# LANGUAGE GeneralizedNewtypeDeriving #
# LANGUAGE TypeApplications #
# LANGUAGE ScopedTypeVariables #
# LANGUAGE LambdaCase #
module TestBase
( runTests
, TestConfig(..)
) where
import System.Directory
import System.FilePath
import System.FilePath.Glob (namesMatching)
import qualified Data.IORef as IOR
import Data.Maybe
import Data.List ( intercalate )
import qualified Lumberjack as LJ
import qualified Test.Tasty as T
import qualified Test.Tasty.HUnit as T
import qualified Test.Tasty.ExpectedFailure as T
import qualified Pate.Arch as PA
import qualified Pate.Binary as PBi
import qualified Pate.Config as PC
import qualified Pate.Equivalence as PEq
import qualified Pate.Event as PE
import qualified Pate.Loader as PL
import qualified Pate.Loader.ELF as PLE
import qualified Pate.Equivalence.Error as PEE
import qualified Pate.PatchPair as PPa
data TestConfig where
TestConfig ::
{ testArchName :: String
, testArchLoader :: PA.ArchLoader PEE.LoadError
, testExpectEquivalenceFailure :: [String]
-- ^ tests which are failing now but eventually should succeed
, testExpectSelfEquivalenceFailure :: [String]
-- ^ tests which fail to prove self-equivalence
, testOutputAddress :: PC.Address
} -> TestConfig
runTests :: TestConfig -> IO ()
runTests cfg = do
let
name = testArchName cfg
glob = "tests" </> name </> "*.original.exe"
globUnequal = "tests" </> name </> "unequal" </> "*.original.exe"
globCondequal = "tests" </> name </> "conditional" </> "*.original.exe"
equivTestFiles <- mapMaybe (stripExtension "original.exe") <$> namesMatching glob
inequivTestFiles <- mapMaybe (stripExtension "original.exe") <$> namesMatching globUnequal
condequivTestFiles <- mapMaybe (stripExtension "original.exe") <$> namesMatching globCondequal
T.defaultMain $ T.testGroup name $
[ T.testGroup "equivalence" $ map (mkTest cfg) equivTestFiles
, T.testGroup "inequivalence" $ map (\fp -> T.testGroup fp $ [mkEquivTest cfg ShouldNotVerify fp]) inequivTestFiles
, T.testGroup "conditional equivalence" $ map (\fp -> T.testGroup fp $ [mkEquivTest cfg ShouldConditionallyVerify fp]) condequivTestFiles
]
expectSelfEquivalenceFailure :: TestConfig -> FilePath -> Bool
expectSelfEquivalenceFailure cfg fp = baseName `elem` (testExpectSelfEquivalenceFailure cfg)
where
(_, baseName) = splitFileName fp
expectEquivalenceFailure :: TestConfig -> ShouldVerify -> FilePath -> Bool
expectEquivalenceFailure cfg sv fp =
baseName `elem` (testExpectEquivalenceFailure cfg)
where
(_, baseName') = splitFileName fp
baseName = case sv of
ShouldVerify -> baseName'
ShouldNotVerify -> "unequal/" ++ baseName'
ShouldConditionallyVerify -> "conditional/" ++ baseName'
mkTest :: TestConfig -> FilePath -> T.TestTree
mkTest cfg fp =
T.testGroup fp $
[ wrap $ T.testCase "original-self" $ doTest (Just PBi.OriginalRepr) cfg ShouldVerify fp
, wrap $ T.testCase "patched-self" $ doTest (Just PBi.PatchedRepr) cfg ShouldVerify fp
, mkEquivTest cfg ShouldVerify fp
]
where
wrap :: T.TestTree -> T.TestTree
wrap t = if (expectSelfEquivalenceFailure cfg fp) then T.expectFail t else t
data ShouldVerify = ShouldVerify | ShouldNotVerify | ShouldConditionallyVerify
mkEquivTest :: TestConfig -> ShouldVerify -> FilePath -> T.TestTree
mkEquivTest cfg sv fp =
wrap $ T.testCase "equivalence" $ doTest Nothing cfg sv fp
where
wrap :: T.TestTree -> T.TestTree
wrap t = if (expectEquivalenceFailure cfg sv fp) then T.expectFail t else t
-- We assume that all of the tests have be compiled with a linker script that
-- defines this section *after* the default data section as the output memory section
defaultOutputRegion :: TestConfig -> PC.MemRegion
defaultOutputRegion cfg = PC.MemRegion
{ PC.memRegionStart = testOutputAddress cfg
NB : in general we could read the actual section from the ELF , but we
-- assume the linker script has placed only read-only memory after this
-- section
-- see:
, PC.memRegionLength = 4000
}
defaultPatchData :: TestConfig -> PC.PatchData
defaultPatchData cfg =
mempty { PC.observableMemory = [defaultOutputRegion cfg] }
doTest ::
forall bin.
Maybe (PBi.WhichBinaryRepr bin) ->
TestConfig ->
ShouldVerify ->
FilePath ->
IO ()
doTest mwb cfg sv fp = do
infoCfgExists <- doesFileExist (fp <.> "toml")
(logsRef :: IOR.IORef [String]) <- IOR.newIORef []
let
addLogMsg :: String -> IO ()
addLogMsg msg = IOR.atomicModifyIORef' logsRef $ \logs -> (msg : logs, ())
failTest :: String -> IO ()
failTest msg = do
logs <- IOR.readIORef logsRef
T.assertFailure (msg ++ "\n" ++ (intercalate "\n" (reverse logs)))
infoPath = if infoCfgExists then Just $ fp <.> "toml" else Nothing
rcfg = PL.RunConfig
{ PL.patchInfoPath = infoPath
, PL.patchData = defaultPatchData cfg
, PL.origPaths = PLE.simplePaths (fp <.> "original" <.> "exe")
, PL.patchedPaths = PLE.simplePaths (fp <.> "patched" <.> "exe")
, PL.verificationCfg = PC.defaultVerificationCfg { PC.cfgFailureMode = PC.ThrowOnAnyFailure, PC.cfgAddOrphanEdges = False, PC.cfgCheckSimplifier = True, PC.cfgIgnoreUnnamedFunctions = False, PC.cfgIgnoreDivergedControlFlow = False}
, PL.logger = \(PA.SomeValidArch{}) -> do
let
act = LJ.LogAction $ \e -> case e of
PE.Warning err -> do
addLogMsg $ "WARNING: " ++ show err
PE.ErrorRaised err -> putStrLn $ "Error: " ++ show err
PE.ProofTraceEvent _ addrPair msg _ -> do
let addr = case addrPair of
PPa.PatchPairC oAddr pAddr | oAddr == pAddr -> "(" ++ show oAddr ++ "," ++ show pAddr ++ ")"
_ -> show (PPa.some addrPair)
addLogMsg $ addr ++ ":" ++ show msg
PE.StrongestPostDesync pPair _ ->
addLogMsg $ "Desync at: " ++ show pPair
PE.StrongestPostObservable pPair _ ->
addLogMsg $ "Observable counterexample at: " ++ show pPair
PE.StrongestPostOverallResult status _ ->
addLogMsg $ "Overall Result:" ++ show status
_ -> return ()
return $ PL.Logger act []
, PL.archLoader = testArchLoader cfg
, PL.useDwarfHints = False
}
result <- case mwb of
Just wb -> PL.runSelfEquivConfig rcfg wb
Nothing -> PL.runEquivConfig rcfg
case result of
PEq.Errored err -> failTest (show err)
PEq.Equivalent -> case sv of
ShouldVerify -> return ()
_ -> failTest "Unexpectedly proved equivalence."
PEq.Inequivalent -> case sv of
ShouldVerify -> failTest "Failed to prove equivalence."
ShouldNotVerify -> return ()
ShouldConditionallyVerify -> failTest "Failed to prove conditional equivalence."
PEq.ConditionallyEquivalent -> case sv of
ShouldVerify -> failTest "Failed to prove equivalence."
ShouldNotVerify -> failTest "Unexpectedly proved conditional equivalence."
ShouldConditionallyVerify -> return ()
| null | https://raw.githubusercontent.com/GaloisInc/pate/e096e9bd5d302267e19b27d940aacf6d50e1347b/tests/TestBase.hs | haskell | # LANGUAGE GADTs #
^ tests which are failing now but eventually should succeed
^ tests which fail to prove self-equivalence
We assume that all of the tests have be compiled with a linker script that
defines this section *after* the default data section as the output memory section
assume the linker script has placed only read-only memory after this
section
see: | # LANGUAGE FlexibleContexts #
# LANGUAGE GeneralizedNewtypeDeriving #
# LANGUAGE TypeApplications #
# LANGUAGE ScopedTypeVariables #
# LANGUAGE LambdaCase #
module TestBase
( runTests
, TestConfig(..)
) where
import System.Directory
import System.FilePath
import System.FilePath.Glob (namesMatching)
import qualified Data.IORef as IOR
import Data.Maybe
import Data.List ( intercalate )
import qualified Lumberjack as LJ
import qualified Test.Tasty as T
import qualified Test.Tasty.HUnit as T
import qualified Test.Tasty.ExpectedFailure as T
import qualified Pate.Arch as PA
import qualified Pate.Binary as PBi
import qualified Pate.Config as PC
import qualified Pate.Equivalence as PEq
import qualified Pate.Event as PE
import qualified Pate.Loader as PL
import qualified Pate.Loader.ELF as PLE
import qualified Pate.Equivalence.Error as PEE
import qualified Pate.PatchPair as PPa
data TestConfig where
TestConfig ::
{ testArchName :: String
, testArchLoader :: PA.ArchLoader PEE.LoadError
, testExpectEquivalenceFailure :: [String]
, testExpectSelfEquivalenceFailure :: [String]
, testOutputAddress :: PC.Address
} -> TestConfig
runTests :: TestConfig -> IO ()
runTests cfg = do
let
name = testArchName cfg
glob = "tests" </> name </> "*.original.exe"
globUnequal = "tests" </> name </> "unequal" </> "*.original.exe"
globCondequal = "tests" </> name </> "conditional" </> "*.original.exe"
equivTestFiles <- mapMaybe (stripExtension "original.exe") <$> namesMatching glob
inequivTestFiles <- mapMaybe (stripExtension "original.exe") <$> namesMatching globUnequal
condequivTestFiles <- mapMaybe (stripExtension "original.exe") <$> namesMatching globCondequal
T.defaultMain $ T.testGroup name $
[ T.testGroup "equivalence" $ map (mkTest cfg) equivTestFiles
, T.testGroup "inequivalence" $ map (\fp -> T.testGroup fp $ [mkEquivTest cfg ShouldNotVerify fp]) inequivTestFiles
, T.testGroup "conditional equivalence" $ map (\fp -> T.testGroup fp $ [mkEquivTest cfg ShouldConditionallyVerify fp]) condequivTestFiles
]
expectSelfEquivalenceFailure :: TestConfig -> FilePath -> Bool
expectSelfEquivalenceFailure cfg fp = baseName `elem` (testExpectSelfEquivalenceFailure cfg)
where
(_, baseName) = splitFileName fp
expectEquivalenceFailure :: TestConfig -> ShouldVerify -> FilePath -> Bool
expectEquivalenceFailure cfg sv fp =
baseName `elem` (testExpectEquivalenceFailure cfg)
where
(_, baseName') = splitFileName fp
baseName = case sv of
ShouldVerify -> baseName'
ShouldNotVerify -> "unequal/" ++ baseName'
ShouldConditionallyVerify -> "conditional/" ++ baseName'
mkTest :: TestConfig -> FilePath -> T.TestTree
mkTest cfg fp =
T.testGroup fp $
[ wrap $ T.testCase "original-self" $ doTest (Just PBi.OriginalRepr) cfg ShouldVerify fp
, wrap $ T.testCase "patched-self" $ doTest (Just PBi.PatchedRepr) cfg ShouldVerify fp
, mkEquivTest cfg ShouldVerify fp
]
where
wrap :: T.TestTree -> T.TestTree
wrap t = if (expectSelfEquivalenceFailure cfg fp) then T.expectFail t else t
data ShouldVerify = ShouldVerify | ShouldNotVerify | ShouldConditionallyVerify
mkEquivTest :: TestConfig -> ShouldVerify -> FilePath -> T.TestTree
mkEquivTest cfg sv fp =
wrap $ T.testCase "equivalence" $ doTest Nothing cfg sv fp
where
wrap :: T.TestTree -> T.TestTree
wrap t = if (expectEquivalenceFailure cfg sv fp) then T.expectFail t else t
defaultOutputRegion :: TestConfig -> PC.MemRegion
defaultOutputRegion cfg = PC.MemRegion
{ PC.memRegionStart = testOutputAddress cfg
NB : in general we could read the actual section from the ELF , but we
, PC.memRegionLength = 4000
}
defaultPatchData :: TestConfig -> PC.PatchData
defaultPatchData cfg =
mempty { PC.observableMemory = [defaultOutputRegion cfg] }
doTest ::
forall bin.
Maybe (PBi.WhichBinaryRepr bin) ->
TestConfig ->
ShouldVerify ->
FilePath ->
IO ()
doTest mwb cfg sv fp = do
infoCfgExists <- doesFileExist (fp <.> "toml")
(logsRef :: IOR.IORef [String]) <- IOR.newIORef []
let
addLogMsg :: String -> IO ()
addLogMsg msg = IOR.atomicModifyIORef' logsRef $ \logs -> (msg : logs, ())
failTest :: String -> IO ()
failTest msg = do
logs <- IOR.readIORef logsRef
T.assertFailure (msg ++ "\n" ++ (intercalate "\n" (reverse logs)))
infoPath = if infoCfgExists then Just $ fp <.> "toml" else Nothing
rcfg = PL.RunConfig
{ PL.patchInfoPath = infoPath
, PL.patchData = defaultPatchData cfg
, PL.origPaths = PLE.simplePaths (fp <.> "original" <.> "exe")
, PL.patchedPaths = PLE.simplePaths (fp <.> "patched" <.> "exe")
, PL.verificationCfg = PC.defaultVerificationCfg { PC.cfgFailureMode = PC.ThrowOnAnyFailure, PC.cfgAddOrphanEdges = False, PC.cfgCheckSimplifier = True, PC.cfgIgnoreUnnamedFunctions = False, PC.cfgIgnoreDivergedControlFlow = False}
, PL.logger = \(PA.SomeValidArch{}) -> do
let
act = LJ.LogAction $ \e -> case e of
PE.Warning err -> do
addLogMsg $ "WARNING: " ++ show err
PE.ErrorRaised err -> putStrLn $ "Error: " ++ show err
PE.ProofTraceEvent _ addrPair msg _ -> do
let addr = case addrPair of
PPa.PatchPairC oAddr pAddr | oAddr == pAddr -> "(" ++ show oAddr ++ "," ++ show pAddr ++ ")"
_ -> show (PPa.some addrPair)
addLogMsg $ addr ++ ":" ++ show msg
PE.StrongestPostDesync pPair _ ->
addLogMsg $ "Desync at: " ++ show pPair
PE.StrongestPostObservable pPair _ ->
addLogMsg $ "Observable counterexample at: " ++ show pPair
PE.StrongestPostOverallResult status _ ->
addLogMsg $ "Overall Result:" ++ show status
_ -> return ()
return $ PL.Logger act []
, PL.archLoader = testArchLoader cfg
, PL.useDwarfHints = False
}
result <- case mwb of
Just wb -> PL.runSelfEquivConfig rcfg wb
Nothing -> PL.runEquivConfig rcfg
case result of
PEq.Errored err -> failTest (show err)
PEq.Equivalent -> case sv of
ShouldVerify -> return ()
_ -> failTest "Unexpectedly proved equivalence."
PEq.Inequivalent -> case sv of
ShouldVerify -> failTest "Failed to prove equivalence."
ShouldNotVerify -> return ()
ShouldConditionallyVerify -> failTest "Failed to prove conditional equivalence."
PEq.ConditionallyEquivalent -> case sv of
ShouldVerify -> failTest "Failed to prove equivalence."
ShouldNotVerify -> failTest "Unexpectedly proved conditional equivalence."
ShouldConditionallyVerify -> return ()
|
b422523abeb96ed8515c4d6a005da5c64a5836887534cd68965500f2775ca351 | mirage/mirage-skeleton | config.ml | open Mirage
let main =
main
~packages:[ package "duration"; package "randomconv" ]
"Unikernel.Timeout1"
(console @-> time @-> random @-> job)
let () =
register "timeout1" [ main $ default_console $ default_time $ default_random ]
| null | https://raw.githubusercontent.com/mirage/mirage-skeleton/144d68992a284730c383eb3d39c409a061bc452e/tutorial/lwt/timeout1/config.ml | ocaml | open Mirage
let main =
main
~packages:[ package "duration"; package "randomconv" ]
"Unikernel.Timeout1"
(console @-> time @-> random @-> job)
let () =
register "timeout1" [ main $ default_console $ default_time $ default_random ]
| |
83136e2274c7c09d9d64d4d1ce79199aa9029f7089230fddae4225ea1bc8f66f | ghcjs/ghcjs | t10598_run.hs | {-# LANGUAGE DeriveAnyClass #-}
# LANGUAGE DerivingStrategies #
# LANGUAGE GeneralizedNewtypeDeriving #
# LANGUAGE StandaloneDeriving #
module Main where
import Data.Proxy
class C a where
c :: proxy a -> Int
c _ = 42
instance C Int where
c _ = 27
newtype Foo = MkFoo Int
deriving Eq
deriving anyclass C
deriving newtype instance Show Foo
main :: IO ()
main = do
print $ MkFoo 100
print $ c (Proxy :: Proxy Foo)
| null | https://raw.githubusercontent.com/ghcjs/ghcjs/e4cd4232a31f6371c761acd93853702f4c7ca74c/test/ghc/deriving/t10598_run.hs | haskell | # LANGUAGE DeriveAnyClass # | # LANGUAGE DerivingStrategies #
# LANGUAGE GeneralizedNewtypeDeriving #
# LANGUAGE StandaloneDeriving #
module Main where
import Data.Proxy
class C a where
c :: proxy a -> Int
c _ = 42
instance C Int where
c _ = 27
newtype Foo = MkFoo Int
deriving Eq
deriving anyclass C
deriving newtype instance Show Foo
main :: IO ()
main = do
print $ MkFoo 100
print $ c (Proxy :: Proxy Foo)
|
5334a6748d08e873858d25077e67a682d9e311c2362dc20c8307a6109e42b775 | kmmelcher/plp-sad | TxtFunctions.hs | Módulo com todas as funções necessárias para manusear os arquivos da database . Com essas funções , consegue - se
adicionar\remover\atualizar linhas em arquivos , além de também fazer leituras .
module Util.TxtFunctions where
import System.IO
import Control.Exception (evaluate)
import Prelude as P
import qualified Data.List as T
Esta função retorna um array de string com todo o do arquivo
-- como separador.
> :
do arquivo no diretório database
fileToStringArray :: String -> IO [String]
fileToStringArray nomeArquivo = do
arquivo <- openFile ("database/" ++ nomeArquivo ++ ".txt") ReadMode
conteudo <- hGetContents arquivo
evaluate (P.length conteudo)
let conteudoEmLista = P.lines conteudo
return conteudoEmLista
Esta função o formato em string de um i d.
Caso não seja encontrado o objeto , uma string vazia é retornada .
> :
nomeArquivo = o nome do arquivo no diretório database no qual o objeto se encontra
idObjeto = O i d do objeto que
getObjetoById :: String -> Int -> IO String
getObjetoById nomeArquivo objetoId = buscaObjetoByAtributo nomeArquivo "id" (show objetoId ++ ",")
Esta função o formato em string de um objeto .
Caso não seja encontrado o objeto , uma string vazia é retornada .
-- > Forma de uso:
- Você deve usar delimitadores no valor do . for uma String ela deve
estar envolta em aspas , se for um inteiro deve uma
seu final .
> :
nomeArquivo = o nome do arquivo no diretório database no qual o objeto se encontra
atributo = O atributo do objeto que deseja ser buscado
valorAtributo = O valor do do objeto que deseja ser buscado
buscaObjetoByAtributo :: String -> String -> String -> IO String
buscaObjetoByAtributo nomeArquivo atributo valorAtributo = do
conteudoEmLista <- fileToStringArray nomeArquivo
buscaObjetoByAtributoRecursivo conteudoEmLista atributo valorAtributo
Esta função trabalha em conjunto com buscaObjetoByAtributo de forma recursiva , o
objeto numa lista as linhas do arquivo .
Caso não seja encontrado o objeto , uma string vazia é retornada .
> :
( objetoAtual : objetosRestantes ) = é o array de string que representa o objeto , tendo como
o objetoAtual e tendo como os próximos objetos o array ObjetosRestantes .
atributo = O atributo do objeto que deseja ser buscado
valorAtributo = O valor do do objeto que deseja ser buscado
buscaObjetoByAtributoRecursivo :: [String] -> String -> String -> IO String
buscaObjetoByAtributoRecursivo [] _ _ = return ""
buscaObjetoByAtributoRecursivo (objetoAtual:objetosRestantes) atributo valorAtributo =
if (atributo ++ " = " ++ valorAtributo) `T.isInfixOf` objetoAtual
then return objetoAtual
else buscaObjetoByAtributoRecursivo objetosRestantes atributo valorAtributo
Checa se um objeto existe na database , retornando True , caso exista , e False , caso contrário .
> :
nomeArquivo = o nome do arquivo no diretório database no qual o objeto se encontra
idObjeto = O i d do objeto que
checaExistenciaById :: String -> Int -> IO Bool
checaExistenciaById nomeArquivo idObjeto = do
existeObjeto <- getObjetoById nomeArquivo idObjeto
return (existeObjeto /= "")
Checa se um objeto existe na database , retornando True , caso exista , e False , caso contrário .
> :
nomeArquivo = o nome do arquivo no diretório database no qual o objeto se encontra
atributo = O atributo do objeto que deseja ser buscado
valorAtributo = O valor do do objeto que deseja ser buscado
checaExistenciaByAtributo :: String -> String -> String -> IO Bool
checaExistenciaByAtributo nomeArquivo atributo valorAtributo = do
existeObjeto <- buscaObjetoByAtributo nomeArquivo atributo valorAtributo
return (existeObjeto /= "")
Esta função adiciona uma linha no arquivo . Caso o arquivo já possua uma linha ,
a funcao irá adicionar na
> :
conteudo = Precisa ser no formato string de um objeto
nomeArquivo = o nome do arquivo no diretório database
adicionaLinha :: String -> String -> IO()
adicionaLinha nomeArquivo conteudo = do
let mensagem = conteudo ++ "\n"
appendFile ("database/" ++ nomeArquivo ++ ".txt") mensagem
Esta função busca um valor de i d atualizado para uma nova linha da database , sendo esse valor o i d da ultima linha somado de 1 .
Caso não haja nenhuma linha presente , o valor " 1 " .
> :
nomeArquivo : o nome do arquivo a ser buscado um novo i d no diretório database
buscaNovoId :: String -> IO String
buscaNovoId nomeArquivo = do
conteudoEmLista <- fileToStringArray nomeArquivo
if null conteudoEmLista
then return "1"
else do
let ultimaLinhaEmLista = P.words (last conteudoEmLista)
let ultimoId = read (P.take (P.length (ultimaLinhaEmLista!!3)-1) (ultimaLinhaEmLista!!3)) :: Int
return (show (ultimoId+1))
atualiza uma linha do arquivo com base no seu i d
> :
nomeArquivo = o nome do arquivo no diretório database
-- id = id da linha
novaLinha linha a ser atualizada
atualizaLinhaById :: String -> String -> String -> IO()
atualizaLinhaById nomeArquivo id novaLinha = do
let path = "database/" ++ nomeArquivo ++ ".txt"
conteudoArquivo <- fileToStringArray nomeArquivo
arquivo <- openFile path WriteMode
hPutStr arquivo ""
atualizaLista conteudoArquivo id novaLinha arquivo
hFlush arquivo
hClose arquivo
Esta funcao atualiza uma linha que contem um i d declarado . remover a linha , ou atualizar uma linha do arquivo .
> :
-- (linhaAtual:linhasRestantes) = conteúdo do arquivo a ser atualizado [sem header]
-- id = id da linha
novaLinha linha a ser atualizada [ " " se for para uma remoção ]
atualizaLista :: [String] -> String -> String -> Handle -> IO ()
atualizaLista [] _ _ _= return ()
atualizaLista (linhaAtual:linhasRestantes) id novaLinha arquivo = do
if ("id = " ++ id ++ ",") `T.isInfixOf` linhaAtual
then do
if novaLinha == ""
then do
atualizaLista linhasRestantes id novaLinha arquivo
else do
hPutStrLn arquivo novaLinha
atualizaLista linhasRestantes id novaLinha arquivo
else do
hPutStrLn arquivo linhaAtual
atualizaLista linhasRestantes id novaLinha arquivo
Remove uma linha com base no seu i d.
> :
nomeArquivo = o nome do arquivo no diretório database
-- id = id da linha
removeLinha :: String -> String -> IO ()
removeLinha nomeArquivo id = do
let path = "database/" ++ nomeArquivo ++ ".txt"
conteudoArquivo <- fileToStringArray nomeArquivo
arquivo <- openFile path WriteMode
hPutStr arquivo ""
atualizaLista conteudoArquivo id "" arquivo
hFlush arquivo
hClose arquivo
-- Adiciona aspas a um String.
Útil na database .
> :
-- texto = texto a ser envolto em aspas
adicionaAspas :: String -> String
adicionaAspas texto = "\"" ++ texto ++ "\"" | null | https://raw.githubusercontent.com/kmmelcher/plp-sad/2b2f09ea9b37df1a1d748d7a4aab45b7ac6fd637/haskell/src/Util/TxtFunctions.hs | haskell | como separador.
> Forma de uso:
id = id da linha
(linhaAtual:linhasRestantes) = conteúdo do arquivo a ser atualizado [sem header]
id = id da linha
id = id da linha
Adiciona aspas a um String.
texto = texto a ser envolto em aspas | Módulo com todas as funções necessárias para manusear os arquivos da database . Com essas funções , consegue - se
adicionar\remover\atualizar linhas em arquivos , além de também fazer leituras .
module Util.TxtFunctions where
import System.IO
import Control.Exception (evaluate)
import Prelude as P
import qualified Data.List as T
Esta função retorna um array de string com todo o do arquivo
> :
do arquivo no diretório database
fileToStringArray :: String -> IO [String]
fileToStringArray nomeArquivo = do
arquivo <- openFile ("database/" ++ nomeArquivo ++ ".txt") ReadMode
conteudo <- hGetContents arquivo
evaluate (P.length conteudo)
let conteudoEmLista = P.lines conteudo
return conteudoEmLista
Esta função o formato em string de um i d.
Caso não seja encontrado o objeto , uma string vazia é retornada .
> :
nomeArquivo = o nome do arquivo no diretório database no qual o objeto se encontra
idObjeto = O i d do objeto que
getObjetoById :: String -> Int -> IO String
getObjetoById nomeArquivo objetoId = buscaObjetoByAtributo nomeArquivo "id" (show objetoId ++ ",")
Esta função o formato em string de um objeto .
Caso não seja encontrado o objeto , uma string vazia é retornada .
- Você deve usar delimitadores no valor do . for uma String ela deve
estar envolta em aspas , se for um inteiro deve uma
seu final .
> :
nomeArquivo = o nome do arquivo no diretório database no qual o objeto se encontra
atributo = O atributo do objeto que deseja ser buscado
valorAtributo = O valor do do objeto que deseja ser buscado
buscaObjetoByAtributo :: String -> String -> String -> IO String
buscaObjetoByAtributo nomeArquivo atributo valorAtributo = do
conteudoEmLista <- fileToStringArray nomeArquivo
buscaObjetoByAtributoRecursivo conteudoEmLista atributo valorAtributo
Esta função trabalha em conjunto com buscaObjetoByAtributo de forma recursiva , o
objeto numa lista as linhas do arquivo .
Caso não seja encontrado o objeto , uma string vazia é retornada .
> :
( objetoAtual : objetosRestantes ) = é o array de string que representa o objeto , tendo como
o objetoAtual e tendo como os próximos objetos o array ObjetosRestantes .
atributo = O atributo do objeto que deseja ser buscado
valorAtributo = O valor do do objeto que deseja ser buscado
buscaObjetoByAtributoRecursivo :: [String] -> String -> String -> IO String
buscaObjetoByAtributoRecursivo [] _ _ = return ""
buscaObjetoByAtributoRecursivo (objetoAtual:objetosRestantes) atributo valorAtributo =
if (atributo ++ " = " ++ valorAtributo) `T.isInfixOf` objetoAtual
then return objetoAtual
else buscaObjetoByAtributoRecursivo objetosRestantes atributo valorAtributo
Checa se um objeto existe na database , retornando True , caso exista , e False , caso contrário .
> :
nomeArquivo = o nome do arquivo no diretório database no qual o objeto se encontra
idObjeto = O i d do objeto que
checaExistenciaById :: String -> Int -> IO Bool
checaExistenciaById nomeArquivo idObjeto = do
existeObjeto <- getObjetoById nomeArquivo idObjeto
return (existeObjeto /= "")
Checa se um objeto existe na database , retornando True , caso exista , e False , caso contrário .
> :
nomeArquivo = o nome do arquivo no diretório database no qual o objeto se encontra
atributo = O atributo do objeto que deseja ser buscado
valorAtributo = O valor do do objeto que deseja ser buscado
checaExistenciaByAtributo :: String -> String -> String -> IO Bool
checaExistenciaByAtributo nomeArquivo atributo valorAtributo = do
existeObjeto <- buscaObjetoByAtributo nomeArquivo atributo valorAtributo
return (existeObjeto /= "")
Esta função adiciona uma linha no arquivo . Caso o arquivo já possua uma linha ,
a funcao irá adicionar na
> :
conteudo = Precisa ser no formato string de um objeto
nomeArquivo = o nome do arquivo no diretório database
adicionaLinha :: String -> String -> IO()
adicionaLinha nomeArquivo conteudo = do
let mensagem = conteudo ++ "\n"
appendFile ("database/" ++ nomeArquivo ++ ".txt") mensagem
Esta função busca um valor de i d atualizado para uma nova linha da database , sendo esse valor o i d da ultima linha somado de 1 .
Caso não haja nenhuma linha presente , o valor " 1 " .
> :
nomeArquivo : o nome do arquivo a ser buscado um novo i d no diretório database
buscaNovoId :: String -> IO String
buscaNovoId nomeArquivo = do
conteudoEmLista <- fileToStringArray nomeArquivo
if null conteudoEmLista
then return "1"
else do
let ultimaLinhaEmLista = P.words (last conteudoEmLista)
let ultimoId = read (P.take (P.length (ultimaLinhaEmLista!!3)-1) (ultimaLinhaEmLista!!3)) :: Int
return (show (ultimoId+1))
atualiza uma linha do arquivo com base no seu i d
> :
nomeArquivo = o nome do arquivo no diretório database
novaLinha linha a ser atualizada
atualizaLinhaById :: String -> String -> String -> IO()
atualizaLinhaById nomeArquivo id novaLinha = do
let path = "database/" ++ nomeArquivo ++ ".txt"
conteudoArquivo <- fileToStringArray nomeArquivo
arquivo <- openFile path WriteMode
hPutStr arquivo ""
atualizaLista conteudoArquivo id novaLinha arquivo
hFlush arquivo
hClose arquivo
Esta funcao atualiza uma linha que contem um i d declarado . remover a linha , ou atualizar uma linha do arquivo .
> :
novaLinha linha a ser atualizada [ " " se for para uma remoção ]
atualizaLista :: [String] -> String -> String -> Handle -> IO ()
atualizaLista [] _ _ _= return ()
atualizaLista (linhaAtual:linhasRestantes) id novaLinha arquivo = do
if ("id = " ++ id ++ ",") `T.isInfixOf` linhaAtual
then do
if novaLinha == ""
then do
atualizaLista linhasRestantes id novaLinha arquivo
else do
hPutStrLn arquivo novaLinha
atualizaLista linhasRestantes id novaLinha arquivo
else do
hPutStrLn arquivo linhaAtual
atualizaLista linhasRestantes id novaLinha arquivo
Remove uma linha com base no seu i d.
> :
nomeArquivo = o nome do arquivo no diretório database
removeLinha :: String -> String -> IO ()
removeLinha nomeArquivo id = do
let path = "database/" ++ nomeArquivo ++ ".txt"
conteudoArquivo <- fileToStringArray nomeArquivo
arquivo <- openFile path WriteMode
hPutStr arquivo ""
atualizaLista conteudoArquivo id "" arquivo
hFlush arquivo
hClose arquivo
Útil na database .
> :
adicionaAspas :: String -> String
adicionaAspas texto = "\"" ++ texto ++ "\"" |
53ad690116c6373227de75dd3602d35ebc57bbbf9f230f388a47e18de49a3b23 | tonyg/erlang-rfc4627 | rfc4627.erl | JSON - RFC 4627 - for Erlang
%%---------------------------------------------------------------------------
@author < >
@author LShift Ltd. < >
2007 - 2010 , 2011 , 2012 and 2007 - 2010 LShift Ltd.
@license
%%
%% Permission is hereby granted, free of charge, to any person
%% obtaining a copy of this software and associated documentation
files ( the " Software " ) , to deal in the Software without
%% restriction, including without limitation the rights to use, copy,
%% modify, merge, publish, distribute, sublicense, and/or sell copies
of the Software , and to permit persons to whom the Software is
%% furnished to do so, subject to the following conditions:
%%
%% The above copyright notice and this permission notice shall be
included in all copies or substantial portions of the Software .
%%
THE SOFTWARE IS PROVIDED " AS IS " , WITHOUT WARRANTY OF ANY KIND ,
%% EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
%% MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
%% NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
BE LIABLE FOR ANY CLAIM , DAMAGES OR OTHER LIABILITY , WHETHER IN AN
%% ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
%% CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
%% SOFTWARE.
%%---------------------------------------------------------------------------
%%
< a href=" / rfc / rfc4627.txt">RFC
%% 4627</a>, the JSON RFC
%%
< a href="/">JSON in general</a >
%%
< a
href=" / pipermail / erlang - questions/2005 - November/017805.html " >
%% message</a> describing the basis of the JSON data type mapping that
%% this module uses
%%
@doc An implementation of RFC 4627 ( JSON , the JavaScript Object Notation ) for Erlang .
%%
%% The basic API is comprised of the {@link encode/1} and {@link decode/1} functions.
%%
%% == Data Type Mapping ==
%%
The data type mapping I 've implemented is as per
%% message [-archive/erlang-questions/200511/msg00193.html] - see {@link json()}.
%%
%% == Unicode ==
%%
%% When serializing a string, if characters are found with codepoint
> 127 , we rely on the unicode encoder to build the proper byte
%% sequence for transmission. We still use the \uXXXX escape for
%% control characters (other than the RFC-specified specially
%% recognised ones).
%%
%% {@link decode/1} will autodetect the unicode encoding used, and any
strings returned in the result as binaries will contain UTF-8
encoded byte sequences for codepoints > 127 . Object keys containing
codepoints > 127 will be returned as lists of codepoints , rather
than being UTF-8 encoded . If you have already transformed the text
%% to parse into a list of unicode codepoints, perhaps by your own use
of { @link unicode_decode/1 } , then use { @link decode_noauto/1 } to
%% avoid redundant and erroneous double-unicode-decoding.
%%
Similarly , { @link encode/1 } produces text that is already UTF-8
%% encoded. To get raw codepoints, use {@link encode_noauto/1} and
{ @link encode_noauto/2 } . You can use { @link unicode_encode/1 } to
UTF - encode the results , if that 's appropriate for your application .
%%
%% == Differences to the specification ==
%%
%% I'm lenient in the following ways during parsing:
%%
%% <ul>
%% <li>repeated commas in arrays and objects collapse to a single comma</li>
%% <li>any character =<32 is considered whitespace</li>
< li > leading zeros for numbers are accepted</li >
%% <li>we don't restrict the toplevel token to only object or array -
%% any JSON value can be used at toplevel</li>
%% </ul>
@type json ( ) = ( ) | jsonarray ( ) | jsonnum ( ) | jsonstr ( ) | true | false | null . An Erlang representation of a general JSON value .
@type jsonobj ( ) = { obj , [ { jsonkey ( ) , ( ) } ] } . A JSON " object " or " struct " .
%% @type jsonkey() = string(). A field-name within a JSON "object".
@type jsonarray ( ) = [ ( ) ] . A JSON array value .
%% @type jsonnum() = integer() | float(). A JSON numeric value.
%% @type jsonstr() = binary(). A JSON string value.
%% @type byte() = integer(). An integer >=0 and =<255.
-module(rfc4627).
-ifdef(use_specs).
-type json() :: jsonobj() | jsonarray() | jsonnum() | jsonstr() | true | false | null.
-type jsonobj() :: {obj, [{jsonkey(), json()}]}.
-type jsonkey() :: string().
-type jsonarray() :: [json()].
-type jsonnum() :: integer() | float().
-type jsonstr() :: binary().
-export_type([json/0,jsonobj/0,jsonkey/0,jsonarray/0,jsonnum/0,jsonstr/0]).
use_specs
-export([mime_type/0, encode/1, decode/1]).
-export([encode_noauto/1, encode_noauto/2, decode_noauto/1]).
-export([unicode_decode/1, unicode_encode/1]).
-export([from_record/3, to_record/3]).
-export([hex_digit/1, digit_hex/1]).
-export([get_field/2, get_field/3, set_field/3, exclude_field/2]).
-export([equiv/2]).
( ) - > string ( )
@doc Returns the IANA - registered MIME type for JSON data .
mime_type() ->
"application/json".
%% @spec (json()) -> [byte()]
%%
@doc Encodes the JSON value supplied , first into Unicode
codepoints , and then into UTF-8 .
%%
%% The resulting string is a list of byte values that should be
interpreted as UTF-8 encoded text .
%%
%% During encoding, atoms and binaries are accepted as keys of JSON
%% objects (type {@link jsonkey()}) as well as the usual strings
%% (lists of character codepoints).
encode(X) ->
unicode_encode({'utf-8', encode_noauto(X)}).
%% @spec (json()) -> string()
%%
@doc Encodes the JSON value supplied into raw Unicode codepoints .
%%
The resulting string may contain codepoints with value > = 128 . You
can use { @link unicode_encode/1 } to UTF - encode the results , if
%% that's appropriate for your application.
%%
%% During encoding, atoms and binaries are accepted as keys of JSON
%% objects (type {@link jsonkey()}) as well as the usual strings
%% (lists of character codepoints).
encode_noauto(X) ->
lists:reverse(encode_noauto(X, [])).
%% @spec (json(), string()) -> string()
%%
@doc As { @link encode_noauto/1 } , but prepends < i > reversed</i > text
%% to the supplied accumulator string.
encode_noauto(true, Acc) ->
"eurt" ++ Acc;
encode_noauto(false, Acc) ->
"eslaf" ++ Acc;
encode_noauto(null, Acc) ->
"llun" ++ Acc;
encode_noauto(Str, Acc) when is_binary(Str) ->
Codepoints = xmerl_ucs:from_utf8(Str),
quote_and_encode_string(Codepoints, Acc);
encode_noauto(Str, Acc) when is_atom(Str) ->
quote_and_encode_string(atom_to_list(Str), Acc);
encode_noauto(Num, Acc) when is_number(Num) ->
encode_number(Num, Acc);
encode_noauto({obj, Fields}, Acc) ->
"}" ++ encode_object(Fields, "{" ++ Acc);
encode_noauto(Dict, Acc) when element(1, Dict) =:= dict ->
"}" ++ encode_object(dict:to_list(Dict), "{" ++ Acc);
encode_noauto(Arr, Acc) when is_list(Arr) ->
"]" ++ encode_array(Arr, "[" ++ Acc).
encode_object([], Acc) ->
Acc;
encode_object([{Key, Value}], Acc) ->
encode_field(Key, Value, Acc);
encode_object([{Key, Value} | Rest], Acc) ->
encode_object(Rest, "," ++ encode_field(Key, Value, Acc)).
encode_field(Key, Value, Acc) when is_binary(Key) ->
Codepoints = xmerl_ucs:from_utf8(Key),
encode_noauto(Value, ":" ++ quote_and_encode_string(Codepoints, Acc));
encode_field(Key, Value, Acc) when is_atom(Key) ->
encode_noauto(Value, ":" ++ quote_and_encode_string(atom_to_list(Key), Acc));
encode_field(Key, Value, Acc) when is_list(Key) ->
encode_noauto(Value, ":" ++ quote_and_encode_string(Key, Acc)).
encode_array([], Acc) ->
Acc;
encode_array([X], Acc) ->
encode_noauto(X, Acc);
encode_array([X | Rest], Acc) ->
encode_array(Rest, "," ++ encode_noauto(X, Acc)).
quote_and_encode_string(Str, Acc) ->
"\"" ++ encode_string(Str, "\"" ++ Acc).
encode_string([], Acc) ->
Acc;
encode_string([$" | Rest], Acc) ->
encode_string(Rest, [$", $\\ | Acc]);
encode_string([$\\ | Rest], Acc) ->
encode_string(Rest, [$\\, $\\ | Acc]);
encode_string([X | Rest], Acc) when X < 32 orelse X > 127 ->
encode_string(Rest, encode_general_char(X, Acc));
encode_string([X | Rest], Acc) ->
encode_string(Rest, [X | Acc]).
encode_general_char(8, Acc) -> [$b, $\\ | Acc];
encode_general_char(9, Acc) -> [$t, $\\ | Acc];
encode_general_char(10, Acc) -> [$n, $\\ | Acc];
encode_general_char(12, Acc) -> [$f, $\\ | Acc];
encode_general_char(13, Acc) -> [$r, $\\ | Acc];
encode_general_char(X, Acc) when X > 127 -> [X | Acc];
encode_general_char(X, Acc) ->
FIXME currently this branch never runs .
%% We could make it configurable, maybe?
Utf16Bytes = xmerl_ucs:to_utf16be(X),
encode_utf16be_chars(Utf16Bytes, Acc).
encode_utf16be_chars([], Acc) ->
Acc;
encode_utf16be_chars([B1, B2 | Rest], Acc) ->
encode_utf16be_chars(Rest, [hex_digit((B2) band 16#F),
hex_digit((B2 bsr 4) band 16#F),
hex_digit((B1) band 16#F),
hex_digit((B1 bsr 4) band 16#F),
$u,
$\\ | Acc]).
%% @spec (Nibble::integer()) -> char()
@doc Returns the character code corresponding to Nibble .
%%
%% Nibble must be >=0 and =<15.
hex_digit(N) when is_integer(N), N >= 0, N =< 9 -> $0 + N;
hex_digit(N) when is_integer(N), N >= 10, N =< 15 -> $A + N - 10.
encode_number(Num, Acc) when is_integer(Num) ->
lists:reverse(integer_to_list(Num), Acc);
encode_number(Num, Acc) when is_float(Num) ->
lists:reverse(float_to_list(Num), Acc).
( Input::(binary ( ) | [ byte ( ) ] ) ) - > ( { ok , ( ) , Remainder } | { error , Reason } )
%% where Remainder = string()
%% Reason = any()
%%
%% @doc Decodes a JSON value from an input binary or string of
Unicode - encoded text .
%%
%% Given a binary, converts it to a list of bytes. Given a
%% list/string, interprets it as a list of bytes.
%%
Uses { @link unicode_decode/1 } on its input , which results in a list
%% of codepoints, and then decodes a JSON value from that list of
%% codepoints.
%%
%% Returns either `{ok, Result, Remainder}', where Remainder is the
%% remaining portion of the input that was not consumed in the process
%% of decoding Result, or `{error, Reason}'.
decode(Bin) when is_binary(Bin) ->
decode(binary_to_list(Bin));
decode(Bytes) ->
{_Charset, Codepoints} = unicode_decode(Bytes),
decode_noauto(Codepoints).
@spec ( Input::string ( ) ) - > ( { ok , ( ) , string ( ) } | { error , any ( ) } )
%%
@doc As { @link decode/1 } , but does not perform Unicode decoding on its input .
%%
Expects a list of codepoints - an ordinary Erlang string - rather
than a list of Unicode - encoded bytes .
decode_noauto(Bin) when is_binary(Bin) ->
decode_noauto(binary_to_list(Bin));
decode_noauto(Chars) ->
case catch parse(skipws(Chars)) of
{'EXIT', Reason} ->
%% Reason is usually far too much information, but helps
%% if needing to debug this module.
{error, Reason};
{Value, Remaining} ->
{ok, Value, skipws(Remaining)}
end.
( [ byte ( ) ] ) - > [ char ( ) ]
%%
@doc Autodetects and decodes using the Unicode encoding of its input .
%%
From RFC4627 , section 3 , " Encoding " :
%%
%% <blockquote>
JSON text SHALL be encoded in Unicode . The default encoding is
UTF-8 .
%%
Since the first two characters of a JSON text will always be ASCII
%% characters [RFC0020], it is possible to determine whether an octet
stream is UTF-8 , UTF-16 ( BE or LE ) , or UTF-32 ( BE or LE ) by looking
at the pattern of nulls in the first four octets .
%%
%% 00 00 00 xx UTF-32BE
00 xx 00
xx 00 00 00 UTF-32LE
xx 00 xx 00 UTF-16LE
%% </blockquote>
%%
%% Interestingly, the BOM (byte-order mark) is not mentioned. We
%% support it here by using it to detect our encoding, discarding it
if present , even though RFC4627 explicitly notes that the first two
%% characters of a JSON text will be ASCII.
%%
%% If a BOM ([]) is present, we use
that ; if not , we use RFC4627 's rules ( as above ) . Note that UTF-32
is the same as UCS-4 for our purposes ( but see also
%% [-9.html]). Note that UTF-16 is
not the same as !
%%
Note that I 'm using xmerl 's UCS / UTF support here . There 's another
UTF-8 codec in asn1rt , which works on binaries instead of lists .
%%
unicode_decode([0,0,254,255|C]) -> {'utf-32', xmerl_ucs:from_ucs4be(C)};
unicode_decode([255,254,0,0|C]) -> {'utf-32', xmerl_ucs:from_ucs4le(C)};
unicode_decode([254,255|C]) -> {'utf-16', xmerl_ucs:from_utf16be(C)};
unicode_decode([239,187,191|C]) -> {'utf-8', xmerl_ucs:from_utf8(C)};
unicode_decode(C=[0,0,_,_|_]) -> {'utf-32be', xmerl_ucs:from_ucs4be(C)};
unicode_decode(C=[_,_,0,0|_]) -> {'utf-32le', xmerl_ucs:from_ucs4le(C)};
unicode_decode(C=[0,_|_]) -> {'utf-16be', xmerl_ucs:from_utf16be(C)};
unicode_decode(C=[_,0|_]) -> {'utf-16le', xmerl_ucs:from_utf16le(C)};
unicode_decode(C=_) -> {'utf-8', xmerl_ucs:from_utf8(C)}.
( EncodingAndCharacters::{Encoding , [ char ( ) ] } ) - > [ byte ( ) ]
%% where Encoding = 'utf-32' | 'utf-32be' | 'utf-32le' | 'utf-16' |
' utf-16be ' | ' utf-16le ' | ' utf-8 '
%%
@doc Encodes the given characters to bytes , using the given Unicode encoding .
%%
%% For convenience, we supply a partial inverse of unicode_decode; If
a BOM is requested , we more - or - less arbitrarily pick the big - endian
%% variant of the encoding, since big-endian is network-order. We
do n't support UTF-8 with BOM here .
unicode_encode({'utf-32', C}) -> [0,0,254,255|xmerl_ucs:to_ucs4be(C)];
unicode_encode({'utf-32be', C}) -> xmerl_ucs:to_ucs4be(C);
unicode_encode({'utf-32le', C}) -> xmerl_ucs:to_ucs4le(C);
unicode_encode({'utf-16', C}) -> [254,255|xmerl_ucs:to_utf16be(C)];
unicode_encode({'utf-16be', C}) -> xmerl_ucs:to_utf16be(C);
unicode_encode({'utf-16le', C}) -> xmerl_ucs:to_utf16le(C);
unicode_encode({'utf-8', C}) -> xmerl_ucs:to_utf8(C).
parse([$" | Rest]) -> %% " emacs balancing
{Codepoints, Rest1} = parse_string(Rest, []),
{list_to_binary(xmerl_ucs:to_utf8(Codepoints)), Rest1};
parse("true" ++ Rest) -> {true, Rest};
parse("false" ++ Rest) -> {false, Rest};
parse("null" ++ Rest) -> {null, Rest};
parse([${ | Rest]) -> parse_object(skipws(Rest), []);
parse([$[ | Rest]) -> parse_array(skipws(Rest), []);
parse([]) -> exit(unexpected_end_of_input);
parse(Chars) -> parse_number(Chars, []).
skipws([X | Rest]) when X =< 32 ->
skipws(Rest);
skipws(Chars) ->
Chars.
parse_string(Chars, Acc) ->
case parse_codepoint(Chars) of
{done, Rest} ->
{lists:reverse(Acc), Rest};
{ok, Codepoint, Rest} ->
parse_string(Rest, [Codepoint | Acc])
end.
parse_codepoint([$" | Rest]) -> %% " emacs balancing
{done, Rest};
parse_codepoint([$\\, Key | Rest]) ->
parse_general_char(Key, Rest);
parse_codepoint([X | Rest]) ->
{ok, X, Rest}.
parse_general_char($b, Rest) -> {ok, 8, Rest};
parse_general_char($t, Rest) -> {ok, 9, Rest};
parse_general_char($n, Rest) -> {ok, 10, Rest};
parse_general_char($f, Rest) -> {ok, 12, Rest};
parse_general_char($r, Rest) -> {ok, 13, Rest};
parse_general_char($/, Rest) -> {ok, $/, Rest};
parse_general_char($\\, Rest) -> {ok, $\\, Rest};
parse_general_char($", Rest) -> {ok, $", Rest};
parse_general_char($u, [D0, D1, D2, D3 | Rest]) ->
Codepoint =
(digit_hex(D0) bsl 12) +
(digit_hex(D1) bsl 8) +
(digit_hex(D2) bsl 4) +
(digit_hex(D3)),
if
Codepoint >= 16#D800 andalso Codepoint < 16#DC00 ->
% High half of surrogate pair
case parse_codepoint(Rest) of
{low_surrogate_pair, Codepoint2, Rest1} ->
[FinalCodepoint] =
xmerl_ucs:from_utf16be(<<Codepoint:16/big-unsigned-integer,
Codepoint2:16/big-unsigned-integer>>),
{ok, FinalCodepoint, Rest1};
_ ->
exit(incorrect_usage_of_surrogate_pair)
end;
Codepoint >= 16#DC00 andalso Codepoint < 16#E000 ->
{low_surrogate_pair, Codepoint, Rest};
true ->
{ok, Codepoint, Rest}
end.
@spec ( Hexchar::char ( ) ) - > integer ( )
@doc Returns the number corresponding to Hexchar .
%%
Hexchar must be one of the characters ` $ 0 ' through ` $ 9 ' , ` $ A '
%% through `$F' or `$a' through `$f'.
digit_hex(C) when is_integer(C), C >= $0, C =< $9 -> C - $0;
digit_hex(C) when is_integer(C), C >= $A, C =< $F -> C - $A + 10;
digit_hex(C) when is_integer(C), C >= $a, C =< $f -> C - $a + 10.
finish_number(Acc, Rest) ->
Str = lists:reverse(Acc),
{case catch list_to_integer(Str) of
{'EXIT', _} -> list_to_float(Str);
Value -> Value
end, Rest}.
parse_number([$- | Rest], Acc) ->
parse_number1(Rest, [$- | Acc]);
parse_number(Rest = [C | _], Acc) ->
case is_digit(C) of
true -> parse_number1(Rest, Acc);
false -> exit(syntax_error)
end.
parse_number1(Rest, Acc) ->
{Acc1, Rest1} = parse_int_part(Rest, Acc),
case Rest1 of
[] -> finish_number(Acc1, []);
[$. | More] ->
{Acc2, Rest2} = parse_int_part(More, [$. | Acc1]),
parse_exp(Rest2, Acc2, false);
_ ->
parse_exp(Rest1, Acc1, true)
end.
parse_int_part(Chars = [_Ch | _Rest], Acc) ->
parse_int_part0(Chars, Acc).
parse_int_part0([], Acc) ->
{Acc, []};
parse_int_part0([Ch | Rest], Acc) ->
case is_digit(Ch) of
true -> parse_int_part0(Rest, [Ch | Acc]);
false -> {Acc, [Ch | Rest]}
end.
parse_exp([$e | Rest], Acc, NeedFrac) ->
parse_exp1(Rest, Acc, NeedFrac);
parse_exp([$E | Rest], Acc, NeedFrac) ->
parse_exp1(Rest, Acc, NeedFrac);
parse_exp(Rest, Acc, _NeedFrac) ->
finish_number(Acc, Rest).
parse_exp1(Rest, Acc, NeedFrac) ->
{Acc1, Rest1} = parse_signed_int_part(Rest, if
NeedFrac -> [$e, $0, $. | Acc];
true -> [$e | Acc]
end),
finish_number(Acc1, Rest1).
parse_signed_int_part([$+ | Rest], Acc) ->
parse_int_part(Rest, [$+ | Acc]);
parse_signed_int_part([$- | Rest], Acc) ->
parse_int_part(Rest, [$- | Acc]);
parse_signed_int_part(Rest, Acc) ->
parse_int_part(Rest, Acc).
is_digit(N) when is_integer(N) -> N >= $0 andalso N =< $9;
is_digit(_) -> false.
parse_object([$} | Rest], Acc) ->
{{obj, lists:reverse(Acc)}, Rest};
parse_object([$, | Rest], Acc) ->
parse_object(skipws(Rest), Acc);
parse_object([$" | Rest], Acc) -> %% " emacs balancing
{KeyCodepoints, Rest1} = parse_string(Rest, []),
[$: | Rest2] = skipws(Rest1),
{Value, Rest3} = parse(skipws(Rest2)),
parse_object(skipws(Rest3), [{KeyCodepoints, Value} | Acc]).
parse_array([$] | Rest], Acc) ->
{lists:reverse(Acc), Rest};
parse_array([$, | Rest], Acc) ->
parse_array(skipws(Rest), Acc);
parse_array(Chars, Acc) ->
{Value, Rest} = parse(Chars),
parse_array(skipws(Rest), [Value | Acc]).
@spec ( Record , atom ( ) , [ any ( ) ] ) - > ( )
%% where Record = tuple()
%%
@doc Used by the ` ? RFC4627_FROM_RECORD ' macro in ` rfc4627.hrl ' .
%%
Given a record type definiton of ` ` -record(myrecord , { field1 ,
field } ) '' , and a value ` ` V = # myrecord { } '' , the code
%% ``?RFC4627_FROM_RECORD(myrecord, V)'' will return a JSON "object"
%% with fields corresponding to the fields of the record. The macro
%% expands to a call to the `from_record' function.
from_record(R, _RecordName, Fields) ->
{obj, encode_record_fields(R, 2, Fields)}.
encode_record_fields(_R, _Index, []) ->
[];
encode_record_fields(R, Index, [Field | Rest]) ->
case element(Index, R) of
undefined ->
encode_record_fields(R, Index + 1, Rest);
Value ->
[{atom_to_list(Field), Value} | encode_record_fields(R, Index + 1, Rest)]
end.
( ( ) , DefaultValue::Record , [ atom ( ) ] ) - > Record
%% where Record = tuple()
%%
%% @doc Used by the `?RFC4627_TO_RECORD' macro in `rfc4627.hrl'.
%%
Given a record type definiton of ` ` -record(myrecord , { field1 ,
field } ) '' , and a JSON " object " ` ` J = { obj , [ { " field1 " , 123 } ,
{ " field2 " , 234 } ] } '' , the code ` ` ? , J ) ''
will return a record ` ` # myrecord{field1 = 123 , field2 = 234 } '' .
The macro expands to a call to the ` to_record ' function .
to_record({obj, Values}, Fallback, Fields) ->
list_to_tuple([element(1, Fallback) | decode_record_fields(Values, Fallback, 2, Fields)]).
decode_record_fields(_Values, _Fallback, _Index, []) ->
[];
decode_record_fields(Values, Fallback, Index, [Field | Rest]) ->
[case lists:keysearch(atom_to_list(Field), 1, Values) of
{value, {_, Value}} ->
Value;
false ->
element(Index, Fallback)
end | decode_record_fields(Values, Fallback, Index + 1, Rest)].
( ( ) , atom ( ) ) - > ( )
%% @doc Exclude a named field from a JSON "object".
exclude_field({obj, Props}, Key) ->
{obj, lists:keydelete(Key, 1, Props)}.
( ( ) , atom ( ) ) - > { ok , ( ) } | not_found
%% @doc Retrieves the value of a named field of a JSON "object".
get_field({obj, Props}, Key) ->
case lists:keysearch(Key, 1, Props) of
{value, {_K, Val}} ->
{ok, Val};
false ->
not_found
end.
@spec ( ( ) , atom ( ) , ) ) - > json ( )
%% @doc Retrieves the value of a named field of a JSON "object", or a
%% default value if no such field is present.
get_field(Obj, Key, DefaultValue) ->
case get_field(Obj, Key) of
{ok, Val} ->
Val;
not_found ->
DefaultValue
end.
( ( ) , atom ( ) , ) ) - > ( )
%% @doc Adds or replaces a named field with the given value.
%%
%% Returns a JSON "object" that contains the new field value as well
as all the unmodified fields from the first argument .
set_field({obj, Props}, Key, NewValue) ->
{obj, [{Key, NewValue} | lists:keydelete(Key, 1, Props)]}.
@spec ( A::json ( ) , ( ) ) - > bool ( )
%% @doc Tests equivalence of JSON terms.
%%
After ` equiv ' predicate in mochijson .
equiv({obj, Props1}, {obj, Props2}) ->
L1 = lists:keysort(1, Props1),
L2 = lists:keysort(1, Props2),
equiv_sorted_plists(L1, L2);
equiv(A, B) when is_list(A) andalso is_list(B) ->
equiv_arrays(A, B);
equiv(A, B) ->
A == B.
equiv_sorted_plists([], []) -> true;
equiv_sorted_plists([], _) -> false;
equiv_sorted_plists(_, []) -> false;
equiv_sorted_plists([{K1, V1} | R1], [{K2, V2} | R2]) ->
K1 == K2 andalso equiv(V1, V2) andalso equiv_sorted_plists(R1, R2).
equiv_arrays([], []) -> true;
equiv_arrays([], _) -> false;
equiv_arrays(_, []) -> false;
equiv_arrays([V1 | R1], [V2 | R2]) ->
equiv(V1, V2) andalso equiv_arrays(R1, R2).
| null | https://raw.githubusercontent.com/tonyg/erlang-rfc4627/1614d95831567ae8b060ce484f7b291048edec2d/src/rfc4627.erl | erlang | ---------------------------------------------------------------------------
Permission is hereby granted, free of charge, to any person
obtaining a copy of this software and associated documentation
restriction, including without limitation the rights to use, copy,
modify, merge, publish, distribute, sublicense, and/or sell copies
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
---------------------------------------------------------------------------
4627</a>, the JSON RFC
message</a> describing the basis of the JSON data type mapping that
this module uses
The basic API is comprised of the {@link encode/1} and {@link decode/1} functions.
== Data Type Mapping ==
message [-archive/erlang-questions/200511/msg00193.html] - see {@link json()}.
== Unicode ==
When serializing a string, if characters are found with codepoint
sequence for transmission. We still use the \uXXXX escape for
control characters (other than the RFC-specified specially
recognised ones).
{@link decode/1} will autodetect the unicode encoding used, and any
to parse into a list of unicode codepoints, perhaps by your own use
avoid redundant and erroneous double-unicode-decoding.
encoded. To get raw codepoints, use {@link encode_noauto/1} and
== Differences to the specification ==
I'm lenient in the following ways during parsing:
<ul>
<li>repeated commas in arrays and objects collapse to a single comma</li>
<li>any character =<32 is considered whitespace</li>
<li>we don't restrict the toplevel token to only object or array -
any JSON value can be used at toplevel</li>
</ul>
@type jsonkey() = string(). A field-name within a JSON "object".
@type jsonnum() = integer() | float(). A JSON numeric value.
@type jsonstr() = binary(). A JSON string value.
@type byte() = integer(). An integer >=0 and =<255.
@spec (json()) -> [byte()]
The resulting string is a list of byte values that should be
During encoding, atoms and binaries are accepted as keys of JSON
objects (type {@link jsonkey()}) as well as the usual strings
(lists of character codepoints).
@spec (json()) -> string()
that's appropriate for your application.
During encoding, atoms and binaries are accepted as keys of JSON
objects (type {@link jsonkey()}) as well as the usual strings
(lists of character codepoints).
@spec (json(), string()) -> string()
to the supplied accumulator string.
We could make it configurable, maybe?
@spec (Nibble::integer()) -> char()
Nibble must be >=0 and =<15.
where Remainder = string()
Reason = any()
@doc Decodes a JSON value from an input binary or string of
Given a binary, converts it to a list of bytes. Given a
list/string, interprets it as a list of bytes.
of codepoints, and then decodes a JSON value from that list of
codepoints.
Returns either `{ok, Result, Remainder}', where Remainder is the
remaining portion of the input that was not consumed in the process
of decoding Result, or `{error, Reason}'.
Reason is usually far too much information, but helps
if needing to debug this module.
<blockquote>
characters [RFC0020], it is possible to determine whether an octet
00 00 00 xx UTF-32BE
</blockquote>
Interestingly, the BOM (byte-order mark) is not mentioned. We
support it here by using it to detect our encoding, discarding it
characters of a JSON text will be ASCII.
If a BOM ([]) is present, we use
[-9.html]). Note that UTF-16 is
where Encoding = 'utf-32' | 'utf-32be' | 'utf-32le' | 'utf-16' |
For convenience, we supply a partial inverse of unicode_decode; If
variant of the encoding, since big-endian is network-order. We
High half of surrogate pair
through `$F' or `$a' through `$f'.
where Record = tuple()
``?RFC4627_FROM_RECORD(myrecord, V)'' will return a JSON "object"
with fields corresponding to the fields of the record. The macro
expands to a call to the `from_record' function.
where Record = tuple()
@doc Used by the `?RFC4627_TO_RECORD' macro in `rfc4627.hrl'.
@doc Exclude a named field from a JSON "object".
@doc Retrieves the value of a named field of a JSON "object".
@doc Retrieves the value of a named field of a JSON "object", or a
default value if no such field is present.
@doc Adds or replaces a named field with the given value.
Returns a JSON "object" that contains the new field value as well
@doc Tests equivalence of JSON terms.
| JSON - RFC 4627 - for Erlang
@author < >
@author LShift Ltd. < >
2007 - 2010 , 2011 , 2012 and 2007 - 2010 LShift Ltd.
@license
files ( the " Software " ) , to deal in the Software without
of the Software , and to permit persons to whom the Software is
included in all copies or substantial portions of the Software .
THE SOFTWARE IS PROVIDED " AS IS " , WITHOUT WARRANTY OF ANY KIND ,
BE LIABLE FOR ANY CLAIM , DAMAGES OR OTHER LIABILITY , WHETHER IN AN
< a href=" / rfc / rfc4627.txt">RFC
< a href="/">JSON in general</a >
< a
href=" / pipermail / erlang - questions/2005 - November/017805.html " >
@doc An implementation of RFC 4627 ( JSON , the JavaScript Object Notation ) for Erlang .
The data type mapping I 've implemented is as per
> 127 , we rely on the unicode encoder to build the proper byte
strings returned in the result as binaries will contain UTF-8
encoded byte sequences for codepoints > 127 . Object keys containing
codepoints > 127 will be returned as lists of codepoints , rather
than being UTF-8 encoded . If you have already transformed the text
of { @link unicode_decode/1 } , then use { @link decode_noauto/1 } to
Similarly , { @link encode/1 } produces text that is already UTF-8
{ @link encode_noauto/2 } . You can use { @link unicode_encode/1 } to
UTF - encode the results , if that 's appropriate for your application .
< li > leading zeros for numbers are accepted</li >
@type json ( ) = ( ) | jsonarray ( ) | jsonnum ( ) | jsonstr ( ) | true | false | null . An Erlang representation of a general JSON value .
@type jsonobj ( ) = { obj , [ { jsonkey ( ) , ( ) } ] } . A JSON " object " or " struct " .
@type jsonarray ( ) = [ ( ) ] . A JSON array value .
-module(rfc4627).
-ifdef(use_specs).
-type json() :: jsonobj() | jsonarray() | jsonnum() | jsonstr() | true | false | null.
-type jsonobj() :: {obj, [{jsonkey(), json()}]}.
-type jsonkey() :: string().
-type jsonarray() :: [json()].
-type jsonnum() :: integer() | float().
-type jsonstr() :: binary().
-export_type([json/0,jsonobj/0,jsonkey/0,jsonarray/0,jsonnum/0,jsonstr/0]).
use_specs
-export([mime_type/0, encode/1, decode/1]).
-export([encode_noauto/1, encode_noauto/2, decode_noauto/1]).
-export([unicode_decode/1, unicode_encode/1]).
-export([from_record/3, to_record/3]).
-export([hex_digit/1, digit_hex/1]).
-export([get_field/2, get_field/3, set_field/3, exclude_field/2]).
-export([equiv/2]).
( ) - > string ( )
@doc Returns the IANA - registered MIME type for JSON data .
mime_type() ->
"application/json".
@doc Encodes the JSON value supplied , first into Unicode
codepoints , and then into UTF-8 .
interpreted as UTF-8 encoded text .
encode(X) ->
unicode_encode({'utf-8', encode_noauto(X)}).
@doc Encodes the JSON value supplied into raw Unicode codepoints .
The resulting string may contain codepoints with value > = 128 . You
can use { @link unicode_encode/1 } to UTF - encode the results , if
encode_noauto(X) ->
lists:reverse(encode_noauto(X, [])).
@doc As { @link encode_noauto/1 } , but prepends < i > reversed</i > text
encode_noauto(true, Acc) ->
"eurt" ++ Acc;
encode_noauto(false, Acc) ->
"eslaf" ++ Acc;
encode_noauto(null, Acc) ->
"llun" ++ Acc;
encode_noauto(Str, Acc) when is_binary(Str) ->
Codepoints = xmerl_ucs:from_utf8(Str),
quote_and_encode_string(Codepoints, Acc);
encode_noauto(Str, Acc) when is_atom(Str) ->
quote_and_encode_string(atom_to_list(Str), Acc);
encode_noauto(Num, Acc) when is_number(Num) ->
encode_number(Num, Acc);
encode_noauto({obj, Fields}, Acc) ->
"}" ++ encode_object(Fields, "{" ++ Acc);
encode_noauto(Dict, Acc) when element(1, Dict) =:= dict ->
"}" ++ encode_object(dict:to_list(Dict), "{" ++ Acc);
encode_noauto(Arr, Acc) when is_list(Arr) ->
"]" ++ encode_array(Arr, "[" ++ Acc).
encode_object([], Acc) ->
Acc;
encode_object([{Key, Value}], Acc) ->
encode_field(Key, Value, Acc);
encode_object([{Key, Value} | Rest], Acc) ->
encode_object(Rest, "," ++ encode_field(Key, Value, Acc)).
encode_field(Key, Value, Acc) when is_binary(Key) ->
Codepoints = xmerl_ucs:from_utf8(Key),
encode_noauto(Value, ":" ++ quote_and_encode_string(Codepoints, Acc));
encode_field(Key, Value, Acc) when is_atom(Key) ->
encode_noauto(Value, ":" ++ quote_and_encode_string(atom_to_list(Key), Acc));
encode_field(Key, Value, Acc) when is_list(Key) ->
encode_noauto(Value, ":" ++ quote_and_encode_string(Key, Acc)).
encode_array([], Acc) ->
Acc;
encode_array([X], Acc) ->
encode_noauto(X, Acc);
encode_array([X | Rest], Acc) ->
encode_array(Rest, "," ++ encode_noauto(X, Acc)).
quote_and_encode_string(Str, Acc) ->
"\"" ++ encode_string(Str, "\"" ++ Acc).
encode_string([], Acc) ->
Acc;
encode_string([$" | Rest], Acc) ->
encode_string(Rest, [$", $\\ | Acc]);
encode_string([$\\ | Rest], Acc) ->
encode_string(Rest, [$\\, $\\ | Acc]);
encode_string([X | Rest], Acc) when X < 32 orelse X > 127 ->
encode_string(Rest, encode_general_char(X, Acc));
encode_string([X | Rest], Acc) ->
encode_string(Rest, [X | Acc]).
encode_general_char(8, Acc) -> [$b, $\\ | Acc];
encode_general_char(9, Acc) -> [$t, $\\ | Acc];
encode_general_char(10, Acc) -> [$n, $\\ | Acc];
encode_general_char(12, Acc) -> [$f, $\\ | Acc];
encode_general_char(13, Acc) -> [$r, $\\ | Acc];
encode_general_char(X, Acc) when X > 127 -> [X | Acc];
encode_general_char(X, Acc) ->
FIXME currently this branch never runs .
Utf16Bytes = xmerl_ucs:to_utf16be(X),
encode_utf16be_chars(Utf16Bytes, Acc).
encode_utf16be_chars([], Acc) ->
Acc;
encode_utf16be_chars([B1, B2 | Rest], Acc) ->
encode_utf16be_chars(Rest, [hex_digit((B2) band 16#F),
hex_digit((B2 bsr 4) band 16#F),
hex_digit((B1) band 16#F),
hex_digit((B1 bsr 4) band 16#F),
$u,
$\\ | Acc]).
@doc Returns the character code corresponding to Nibble .
hex_digit(N) when is_integer(N), N >= 0, N =< 9 -> $0 + N;
hex_digit(N) when is_integer(N), N >= 10, N =< 15 -> $A + N - 10.
encode_number(Num, Acc) when is_integer(Num) ->
lists:reverse(integer_to_list(Num), Acc);
encode_number(Num, Acc) when is_float(Num) ->
lists:reverse(float_to_list(Num), Acc).
( Input::(binary ( ) | [ byte ( ) ] ) ) - > ( { ok , ( ) , Remainder } | { error , Reason } )
Unicode - encoded text .
Uses { @link unicode_decode/1 } on its input , which results in a list
decode(Bin) when is_binary(Bin) ->
decode(binary_to_list(Bin));
decode(Bytes) ->
{_Charset, Codepoints} = unicode_decode(Bytes),
decode_noauto(Codepoints).
@spec ( Input::string ( ) ) - > ( { ok , ( ) , string ( ) } | { error , any ( ) } )
@doc As { @link decode/1 } , but does not perform Unicode decoding on its input .
Expects a list of codepoints - an ordinary Erlang string - rather
than a list of Unicode - encoded bytes .
decode_noauto(Bin) when is_binary(Bin) ->
decode_noauto(binary_to_list(Bin));
decode_noauto(Chars) ->
case catch parse(skipws(Chars)) of
{'EXIT', Reason} ->
{error, Reason};
{Value, Remaining} ->
{ok, Value, skipws(Remaining)}
end.
( [ byte ( ) ] ) - > [ char ( ) ]
@doc Autodetects and decodes using the Unicode encoding of its input .
From RFC4627 , section 3 , " Encoding " :
JSON text SHALL be encoded in Unicode . The default encoding is
UTF-8 .
Since the first two characters of a JSON text will always be ASCII
stream is UTF-8 , UTF-16 ( BE or LE ) , or UTF-32 ( BE or LE ) by looking
at the pattern of nulls in the first four octets .
00 xx 00
xx 00 00 00 UTF-32LE
xx 00 xx 00 UTF-16LE
if present , even though RFC4627 explicitly notes that the first two
that ; if not , we use RFC4627 's rules ( as above ) . Note that UTF-32
is the same as UCS-4 for our purposes ( but see also
not the same as !
Note that I 'm using xmerl 's UCS / UTF support here . There 's another
UTF-8 codec in asn1rt , which works on binaries instead of lists .
unicode_decode([0,0,254,255|C]) -> {'utf-32', xmerl_ucs:from_ucs4be(C)};
unicode_decode([255,254,0,0|C]) -> {'utf-32', xmerl_ucs:from_ucs4le(C)};
unicode_decode([254,255|C]) -> {'utf-16', xmerl_ucs:from_utf16be(C)};
unicode_decode([239,187,191|C]) -> {'utf-8', xmerl_ucs:from_utf8(C)};
unicode_decode(C=[0,0,_,_|_]) -> {'utf-32be', xmerl_ucs:from_ucs4be(C)};
unicode_decode(C=[_,_,0,0|_]) -> {'utf-32le', xmerl_ucs:from_ucs4le(C)};
unicode_decode(C=[0,_|_]) -> {'utf-16be', xmerl_ucs:from_utf16be(C)};
unicode_decode(C=[_,0|_]) -> {'utf-16le', xmerl_ucs:from_utf16le(C)};
unicode_decode(C=_) -> {'utf-8', xmerl_ucs:from_utf8(C)}.
( EncodingAndCharacters::{Encoding , [ char ( ) ] } ) - > [ byte ( ) ]
' utf-16be ' | ' utf-16le ' | ' utf-8 '
@doc Encodes the given characters to bytes , using the given Unicode encoding .
a BOM is requested , we more - or - less arbitrarily pick the big - endian
do n't support UTF-8 with BOM here .
unicode_encode({'utf-32', C}) -> [0,0,254,255|xmerl_ucs:to_ucs4be(C)];
unicode_encode({'utf-32be', C}) -> xmerl_ucs:to_ucs4be(C);
unicode_encode({'utf-32le', C}) -> xmerl_ucs:to_ucs4le(C);
unicode_encode({'utf-16', C}) -> [254,255|xmerl_ucs:to_utf16be(C)];
unicode_encode({'utf-16be', C}) -> xmerl_ucs:to_utf16be(C);
unicode_encode({'utf-16le', C}) -> xmerl_ucs:to_utf16le(C);
unicode_encode({'utf-8', C}) -> xmerl_ucs:to_utf8(C).
parse([$" | Rest]) -> %% " emacs balancing
{Codepoints, Rest1} = parse_string(Rest, []),
{list_to_binary(xmerl_ucs:to_utf8(Codepoints)), Rest1};
parse("true" ++ Rest) -> {true, Rest};
parse("false" ++ Rest) -> {false, Rest};
parse("null" ++ Rest) -> {null, Rest};
parse([${ | Rest]) -> parse_object(skipws(Rest), []);
parse([$[ | Rest]) -> parse_array(skipws(Rest), []);
parse([]) -> exit(unexpected_end_of_input);
parse(Chars) -> parse_number(Chars, []).
skipws([X | Rest]) when X =< 32 ->
skipws(Rest);
skipws(Chars) ->
Chars.
parse_string(Chars, Acc) ->
case parse_codepoint(Chars) of
{done, Rest} ->
{lists:reverse(Acc), Rest};
{ok, Codepoint, Rest} ->
parse_string(Rest, [Codepoint | Acc])
end.
parse_codepoint([$" | Rest]) -> %% " emacs balancing
{done, Rest};
parse_codepoint([$\\, Key | Rest]) ->
parse_general_char(Key, Rest);
parse_codepoint([X | Rest]) ->
{ok, X, Rest}.
parse_general_char($b, Rest) -> {ok, 8, Rest};
parse_general_char($t, Rest) -> {ok, 9, Rest};
parse_general_char($n, Rest) -> {ok, 10, Rest};
parse_general_char($f, Rest) -> {ok, 12, Rest};
parse_general_char($r, Rest) -> {ok, 13, Rest};
parse_general_char($/, Rest) -> {ok, $/, Rest};
parse_general_char($\\, Rest) -> {ok, $\\, Rest};
parse_general_char($", Rest) -> {ok, $", Rest};
parse_general_char($u, [D0, D1, D2, D3 | Rest]) ->
Codepoint =
(digit_hex(D0) bsl 12) +
(digit_hex(D1) bsl 8) +
(digit_hex(D2) bsl 4) +
(digit_hex(D3)),
if
Codepoint >= 16#D800 andalso Codepoint < 16#DC00 ->
case parse_codepoint(Rest) of
{low_surrogate_pair, Codepoint2, Rest1} ->
[FinalCodepoint] =
xmerl_ucs:from_utf16be(<<Codepoint:16/big-unsigned-integer,
Codepoint2:16/big-unsigned-integer>>),
{ok, FinalCodepoint, Rest1};
_ ->
exit(incorrect_usage_of_surrogate_pair)
end;
Codepoint >= 16#DC00 andalso Codepoint < 16#E000 ->
{low_surrogate_pair, Codepoint, Rest};
true ->
{ok, Codepoint, Rest}
end.
@spec ( Hexchar::char ( ) ) - > integer ( )
@doc Returns the number corresponding to Hexchar .
Hexchar must be one of the characters ` $ 0 ' through ` $ 9 ' , ` $ A '
digit_hex(C) when is_integer(C), C >= $0, C =< $9 -> C - $0;
digit_hex(C) when is_integer(C), C >= $A, C =< $F -> C - $A + 10;
digit_hex(C) when is_integer(C), C >= $a, C =< $f -> C - $a + 10.
finish_number(Acc, Rest) ->
Str = lists:reverse(Acc),
{case catch list_to_integer(Str) of
{'EXIT', _} -> list_to_float(Str);
Value -> Value
end, Rest}.
parse_number([$- | Rest], Acc) ->
parse_number1(Rest, [$- | Acc]);
parse_number(Rest = [C | _], Acc) ->
case is_digit(C) of
true -> parse_number1(Rest, Acc);
false -> exit(syntax_error)
end.
parse_number1(Rest, Acc) ->
{Acc1, Rest1} = parse_int_part(Rest, Acc),
case Rest1 of
[] -> finish_number(Acc1, []);
[$. | More] ->
{Acc2, Rest2} = parse_int_part(More, [$. | Acc1]),
parse_exp(Rest2, Acc2, false);
_ ->
parse_exp(Rest1, Acc1, true)
end.
parse_int_part(Chars = [_Ch | _Rest], Acc) ->
parse_int_part0(Chars, Acc).
parse_int_part0([], Acc) ->
{Acc, []};
parse_int_part0([Ch | Rest], Acc) ->
case is_digit(Ch) of
true -> parse_int_part0(Rest, [Ch | Acc]);
false -> {Acc, [Ch | Rest]}
end.
parse_exp([$e | Rest], Acc, NeedFrac) ->
parse_exp1(Rest, Acc, NeedFrac);
parse_exp([$E | Rest], Acc, NeedFrac) ->
parse_exp1(Rest, Acc, NeedFrac);
parse_exp(Rest, Acc, _NeedFrac) ->
finish_number(Acc, Rest).
parse_exp1(Rest, Acc, NeedFrac) ->
{Acc1, Rest1} = parse_signed_int_part(Rest, if
NeedFrac -> [$e, $0, $. | Acc];
true -> [$e | Acc]
end),
finish_number(Acc1, Rest1).
parse_signed_int_part([$+ | Rest], Acc) ->
parse_int_part(Rest, [$+ | Acc]);
parse_signed_int_part([$- | Rest], Acc) ->
parse_int_part(Rest, [$- | Acc]);
parse_signed_int_part(Rest, Acc) ->
parse_int_part(Rest, Acc).
is_digit(N) when is_integer(N) -> N >= $0 andalso N =< $9;
is_digit(_) -> false.
parse_object([$} | Rest], Acc) ->
{{obj, lists:reverse(Acc)}, Rest};
parse_object([$, | Rest], Acc) ->
parse_object(skipws(Rest), Acc);
parse_object([$" | Rest], Acc) -> %% " emacs balancing
{KeyCodepoints, Rest1} = parse_string(Rest, []),
[$: | Rest2] = skipws(Rest1),
{Value, Rest3} = parse(skipws(Rest2)),
parse_object(skipws(Rest3), [{KeyCodepoints, Value} | Acc]).
parse_array([$] | Rest], Acc) ->
{lists:reverse(Acc), Rest};
parse_array([$, | Rest], Acc) ->
parse_array(skipws(Rest), Acc);
parse_array(Chars, Acc) ->
{Value, Rest} = parse(Chars),
parse_array(skipws(Rest), [Value | Acc]).
@spec ( Record , atom ( ) , [ any ( ) ] ) - > ( )
@doc Used by the ` ? RFC4627_FROM_RECORD ' macro in ` rfc4627.hrl ' .
Given a record type definiton of ` ` -record(myrecord , { field1 ,
field } ) '' , and a value ` ` V = # myrecord { } '' , the code
from_record(R, _RecordName, Fields) ->
{obj, encode_record_fields(R, 2, Fields)}.
encode_record_fields(_R, _Index, []) ->
[];
encode_record_fields(R, Index, [Field | Rest]) ->
case element(Index, R) of
undefined ->
encode_record_fields(R, Index + 1, Rest);
Value ->
[{atom_to_list(Field), Value} | encode_record_fields(R, Index + 1, Rest)]
end.
( ( ) , DefaultValue::Record , [ atom ( ) ] ) - > Record
Given a record type definiton of ` ` -record(myrecord , { field1 ,
field } ) '' , and a JSON " object " ` ` J = { obj , [ { " field1 " , 123 } ,
{ " field2 " , 234 } ] } '' , the code ` ` ? , J ) ''
will return a record ` ` # myrecord{field1 = 123 , field2 = 234 } '' .
The macro expands to a call to the ` to_record ' function .
to_record({obj, Values}, Fallback, Fields) ->
list_to_tuple([element(1, Fallback) | decode_record_fields(Values, Fallback, 2, Fields)]).
decode_record_fields(_Values, _Fallback, _Index, []) ->
[];
decode_record_fields(Values, Fallback, Index, [Field | Rest]) ->
[case lists:keysearch(atom_to_list(Field), 1, Values) of
{value, {_, Value}} ->
Value;
false ->
element(Index, Fallback)
end | decode_record_fields(Values, Fallback, Index + 1, Rest)].
( ( ) , atom ( ) ) - > ( )
exclude_field({obj, Props}, Key) ->
{obj, lists:keydelete(Key, 1, Props)}.
( ( ) , atom ( ) ) - > { ok , ( ) } | not_found
get_field({obj, Props}, Key) ->
case lists:keysearch(Key, 1, Props) of
{value, {_K, Val}} ->
{ok, Val};
false ->
not_found
end.
@spec ( ( ) , atom ( ) , ) ) - > json ( )
get_field(Obj, Key, DefaultValue) ->
case get_field(Obj, Key) of
{ok, Val} ->
Val;
not_found ->
DefaultValue
end.
( ( ) , atom ( ) , ) ) - > ( )
as all the unmodified fields from the first argument .
set_field({obj, Props}, Key, NewValue) ->
{obj, [{Key, NewValue} | lists:keydelete(Key, 1, Props)]}.
@spec ( A::json ( ) , ( ) ) - > bool ( )
After ` equiv ' predicate in mochijson .
equiv({obj, Props1}, {obj, Props2}) ->
L1 = lists:keysort(1, Props1),
L2 = lists:keysort(1, Props2),
equiv_sorted_plists(L1, L2);
equiv(A, B) when is_list(A) andalso is_list(B) ->
equiv_arrays(A, B);
equiv(A, B) ->
A == B.
equiv_sorted_plists([], []) -> true;
equiv_sorted_plists([], _) -> false;
equiv_sorted_plists(_, []) -> false;
equiv_sorted_plists([{K1, V1} | R1], [{K2, V2} | R2]) ->
K1 == K2 andalso equiv(V1, V2) andalso equiv_sorted_plists(R1, R2).
equiv_arrays([], []) -> true;
equiv_arrays([], _) -> false;
equiv_arrays(_, []) -> false;
equiv_arrays([V1 | R1], [V2 | R2]) ->
equiv(V1, V2) andalso equiv_arrays(R1, R2).
|
0f2e3ddc61b4dd9e803d3996301559beeac25b9a098921841aa191e9cdf16e9f | reanimate/reanimate | LaTeX.hs | {-# LANGUAGE DeriveAnyClass #-}
# LANGUAGE DeriveGeneric #
{-# LANGUAGE OverloadedStrings #-}
{-# LANGUAGE PatternSynonyms #-}
# LANGUAGE ScopedTypeVariables #
-- |
Copyright : Written by
License :
-- Maintainer :
-- Stability : experimental
-- Portability : POSIX
module Reanimate.LaTeX
( latexCfg,
TexEngine (..),
TexConfig (..),
latex,
latexWithHeaders,
latexChunks,
latexCfgChunks,
latexCfgChunksTrans,
mathChunks,
xelatex,
xelatexWithHeaders,
ctex,
ctexWithHeaders,
latexAlign,
-- * Font configurations
chalkduster,
calligra,
noto,
helvet,
libertine,
biolinum,
droidSerif,
droidSans,
)
where
import Control.Lens ((&), (.~))
import Control.Monad.State (runState, state)
import qualified Data.ByteString as B
import Data.Foldable (Foldable (fold))
import Data.Hashable (Hashable)
import Data.Text (Text)
import qualified Data.Text as T
import qualified Data.Text.Encoding as T
import qualified Data.Text.IO as T
import GHC.Generics (Generic)
import Graphics.SvgTree (Tree, clipPathRef, clipRule, mapTree, parseSvgFile,
pattern ClipPathTree, pattern None, strokeColor)
import Reanimate.Animation (SVG)
import Reanimate.Cache (cacheDiskSvg, cacheMem)
import Reanimate.External (zipArchive)
import Reanimate.Misc (requireExecutable, runCmd, withTempDir, withTempFile)
import Reanimate.Parameters (pNoExternals)
import Reanimate.Svg
import System.FilePath (replaceExtension, takeFileName, (</>))
import System.IO.Unsafe (unsafePerformIO)
-- | TeX backends. They have different features and capabilities.
data TexEngine = LaTeX | XeLaTeX | LuaLaTeX
deriving (Generic, Hashable, Eq, Ord, Read, Show)
-- | TeX configurations can load packages and set up environments for tex scripts.
data TexConfig = TexConfig
{ texConfigEngine :: TexEngine,
texConfigHeaders :: [T.Text],
texConfigPostScript :: [T.Text]
}
deriving (Generic, Hashable, Read, Show, Eq, Ord)
defaultTexConfig :: TexConfig
defaultTexConfig = TexConfig LaTeX [] []
-- | Render TeX script using a given configuration.
latexCfg :: TexConfig -> T.Text -> SVG
latexCfg (TexConfig engine headers postscript) =
gen postscript headers
where
gen =
case engine of
LaTeX -> someTexWithHeaders engine "latex" "dvi" []
XeLaTeX -> someTexWithHeaders engine "xelatex" "xdv" ["-no-pdf"]
LuaLaTeX -> someTexWithHeaders engine "lualatex" "pdf" []
-- | Invoke latex and import the result as an SVG object. SVG objects are
-- cached to improve performance.
--
-- Example:
--
> latex " $ e^{i\\pi}+1=0 $ "
--
-- <<docs/gifs/doc_latex.gif>>
latex :: T.Text -> Tree
latex = latexWithHeaders []
-- | Invoke latex with extra script headers.
latexWithHeaders :: [T.Text] -> T.Text -> Tree
latexWithHeaders = someTexWithHeaders LaTeX "latex" "dvi" [] []
someTexWithHeaders ::
TexEngine ->
String ->
String ->
[String] ->
[T.Text] ->
[T.Text] ->
T.Text ->
Tree
someTexWithHeaders _engine _exec _dvi _args _postscript _headers tex
| pNoExternals = mkText tex
someTexWithHeaders engine exec dvi args postscript headers tex =
(unsafePerformIO . (cacheMem . cacheDiskSvg) (latexToSVG engine dvi exec args))
script
where
script = mkTexScript exec args headers (T.unlines (postscript ++ [tex]))
-- | Invoke latex using a given configuration and separate results.
Apply the transformation to the LaTeX segments .
See also ' ' , the transformation is @(\s - > " $ " < > s < > " $ " ) @.
latexCfgChunksTrans :: Traversable t => TexConfig -> (T.Text -> T.Text) -> t T.Text -> t Tree
latexCfgChunksTrans _cfg f chunks | pNoExternals = fmap (mkText . f) chunks
latexCfgChunksTrans cfg f chunks = worker $ svgGlyphs $ tex $ f $ fold chunks
where
tex = latexCfg cfg
merge lst = mkGroup [fmt svg | (fmt, _, svg) <- lst]
checkResult (r, []) = r
checkResult (_, _) = error "latex chunk mismatch"
worker = checkResult . runState (mapM (state . workerSingle) (fmap f chunks))
workerSingle x everything =
let width = length $ svgGlyphs (tex x)
(first, rest) = splitAt width everything
in (merge first, rest)
-- | Render math formula and separate results.
mathChunks :: Traversable t => t T.Text -> t Tree
mathChunks = latexCfgChunksTrans defaultTexConfig (\s -> "$" <> s <> "$")
-- | Invoke latex using a given configuration and separate results.
latexCfgChunks :: Traversable t => TexConfig -> t T.Text -> t Tree
latexCfgChunks cfg = latexCfgChunksTrans cfg id
-- | Invoke latex and separate results.
latexChunks :: Traversable t => t T.Text -> t Tree
latexChunks = latexCfgChunksTrans defaultTexConfig id
-- | Invoke xelatex and import the result as an SVG object. SVG objects are
cached to improve performance . Xelatex has support for non - western scripts .
xelatex :: Text -> Tree
xelatex = xelatexWithHeaders []
-- | Invoke xelatex with extra script headers.
xelatexWithHeaders :: [T.Text] -> T.Text -> Tree
xelatexWithHeaders = someTexWithHeaders XeLaTeX "xelatex" "xdv" ["-no-pdf"] []
| Invoke xelatex with " } " and import the result as an
SVG object . SVG objects are cached to improve performance . Xelatex has
-- support for non-western scripts.
--
-- Example:
--
-- > ctex "中文"
--
-- <<docs/gifs/doc_ctex.gif>>
ctex :: T.Text -> Tree
ctex = ctexWithHeaders []
-- | Invoke xelatex with extra script headers + ctex headers.
ctexWithHeaders :: [T.Text] -> T.Text -> Tree
ctexWithHeaders headers = xelatexWithHeaders ("\\usepackage[UTF8]{ctex}" : headers)
-- | Invoke latex and import the result as an SVG object. SVG objects are
cached to improve performance . This wraps the TeX code in an ' align * '
-- context.
--
-- Example:
--
-- > latexAlign "R = \\frac{{\\Delta x}}{{kA}}"
--
-- <<docs/gifs/doc_latexAlign.gif>>
latexAlign :: Text -> Tree
latexAlign tex = latex $ T.unlines ["\\begin{align*}", tex, "\\end{align*}"]
postprocess :: Tree -> Tree
postprocess =
simplify
. lowerTransformations
. scaleXY 0.1 (-0.1)
. removeClipPaths
. lowerIds
. mapTree clearDrawAttr
where
clearDrawAttr t = t & strokeColor .~ Nothing
enginePostprocess :: TexEngine -> Tree -> Tree
enginePostprocess LuaLaTeX svg = translate 0 (svgHeight svg) svg
enginePostprocess _ svg = svg
removeClipPaths :: SVG -> SVG
removeClipPaths = mapTree worker
where
worker ClipPathTree {} = None
worker t = t & clipRule .~ Nothing & clipPathRef .~ Nothing
-- executable, arguments, header, tex
latexToSVG :: TexEngine -> String -> String -> [String] -> Text -> IO Tree
latexToSVG engine dviExt latexExec latexArgs tex = do
latexBin <- requireExecutable latexExec
withTempDir $ \tmp_dir -> withTempFile "tex" $ \tex_file ->
withTempFile "svg" $ \svg_file -> do
let dvi_file =
tmp_dir </> replaceExtension (takeFileName tex_file) dviExt
B.writeFile tex_file (T.encodeUtf8 tex)
runCmd
latexBin
( latexArgs
++ [ "-interaction=nonstopmode",
"-halt-on-error",
"-output-directory=" ++ tmp_dir,
tex_file
]
)
if dviExt == "pdf"
then do
pdf2svg <- requireExecutable "pdf2svg"
runCmd
pdf2svg
[dvi_file, svg_file]
else do
dvisvgm <- requireExecutable "dvisvgm"
runCmd
dvisvgm
[ dvi_file,
"--precision=5",
better bboxes .
"--no-fonts", -- use glyphs instead of fonts.
"--verbosity=0",
"-o",
svg_file
]
svg_data <- T.readFile svg_file
case parseSvgFile svg_file svg_data of
Nothing -> error "Malformed svg"
Just svg ->
return $
enginePostprocess engine $
postprocess $ unbox $ replaceUses svg
mkTexScript :: String -> [String] -> [Text] -> Text -> Text
mkTexScript latexExec latexArgs texHeaders tex =
T.unlines $
[ "% " <> T.pack (unwords (latexExec : latexArgs)),
"\\documentclass[preview]{standalone}",
"\\usepackage{amsmath}",
"\\usepackage{gensymb}"
]
++ texHeaders
++ [ "\\usepackage[english]{babel}",
"\\linespread{1}",
"\\begin{document}",
tex,
"\\end{document}"
]
Packages used by manim .
\\\usepackage{amsmath}\n\
\\\usepackage{amssymb}\n\
\\\usepackage{dsfont}\n\
\\\usepackage{setspace}\n\
\\\usepackage{relsize}\n\
\\\usepackage{textcomp}\n\
\\\usepackage{mathrsfs}\n\
\\\usepackage{calligra}\n\
\\\usepackage{wasysym}\n\
\\\usepackage{ragged2e}\n\
\\\usepackage{physics}\n\
\\\usepackage{xcolor}\n\
\\\usepackage{textcomp}\n\
\\\usepackage{xfrac}\n\
\\\usepackage{microtype}\n\
\\\usepackage{amsmath}\n\
\\\usepackage{amssymb}\n\
\\\usepackage{dsfont}\n\
\\\usepackage{setspace}\n\
\\\usepackage{relsize}\n\
\\\usepackage{textcomp}\n\
\\\usepackage{mathrsfs}\n\
\\\usepackage{calligra}\n\
\\\usepackage{wasysym}\n\
\\\usepackage{ragged2e}\n\
\\\usepackage{physics}\n\
\\\usepackage{xcolor}\n\
\\\usepackage{textcomp}\n\
\\\usepackage{xfrac}\n\
\\\usepackage{microtype}\n\
-}
-- | Chalkduster font. Depends on lualatex.
files are automatically downloaded .
--
-- @
-- `latexCfg` `chalkduster` "chalkduster"
-- @
--
-- <<docs/gifs/doc_chalkduster.gif>>
chalkduster :: TexConfig
chalkduster =
TexConfig
{ texConfigEngine = XeLaTeX,
texConfigHeaders =
[ "\\usepackage[no-math]{fontspec}",
"\\setmainfont[Mapping=tex-text,Path={" <> chalkdusterFont <> "/},Extension=.ttf]{Chalkduster}",
"\\usepackage[defaultmathsizes]{mathastext}"
],
texConfigPostScript = []
}
where
chalkdusterFont =
T.pack $
zipArchive
""
"Wplv4RjuFiI0hDQnAM5MVHl2evrZqWstRLdVAfBomCM="
-- |
-- @
` latexCfg ` ` calligra ` " "
-- @
--
-- <<docs/gifs/doc_calligra.gif>>
calligra :: TexConfig
calligra =
TexConfig
{ texConfigEngine = LaTeX,
texConfigHeaders = ["\\usepackage{calligra}"],
texConfigPostScript = ["\\calligra"]
}
-- |
-- @
-- `latexCfg` `noto` "noto"
-- @
--
-- <<docs/gifs/doc_noto.gif>>
noto :: TexConfig
noto =
TexConfig
{ texConfigEngine = LaTeX,
texConfigHeaders = ["\\usepackage{noto}"],
texConfigPostScript = []
}
-- |
-- @
-- `latexCfg` `helvet` "helvet"
-- @
--
-- <<docs/gifs/doc_helvet.gif>>
helvet :: TexConfig
helvet =
TexConfig
{ texConfigEngine = LaTeX,
texConfigHeaders = ["\\usepackage{helvet}"],
texConfigPostScript = []
}
-- |
-- @
-- `latexCfg` `libertine` "libertine"
-- @
--
-- <<docs/gifs/doc_libertine.gif>>
libertine :: TexConfig
libertine =
TexConfig
{ texConfigEngine = LaTeX,
texConfigHeaders = ["\\usepackage{libertine}"],
texConfigPostScript = []
}
-- |
-- @
-- `latexCfg` `biolinum` "biolinum"
-- @
--
-- <<docs/gifs/doc_biolinum.gif>>
biolinum :: TexConfig
biolinum =
TexConfig
{ texConfigEngine = LaTeX,
texConfigHeaders =
["\\usepackage{libertine}"
,"\\renewcommand{\\familydefault}{\\sfdefault}"],
texConfigPostScript = []
}
-- |
-- @
-- `latexCfg` `droidSerif` "droidSerif"
-- @
--
-- <<docs/gifs/doc_droidSerif.gif>>
droidSerif :: TexConfig
droidSerif =
TexConfig
{ texConfigEngine = LaTeX,
texConfigHeaders =
["\\usepackage[default]{droidserif}"
,"\\let\\varepsilon\\epsilon"],
texConfigPostScript = []
}
-- |
-- @
-- `latexCfg` `droidSans` "droidSans"
-- @
--
-- <<docs/gifs/doc_droidSans.gif>>
droidSans :: TexConfig
droidSans =
TexConfig
{ texConfigEngine = LaTeX,
texConfigHeaders =
["\\usepackage[default]{droidsans}"
,"\\let\\varepsilon\\epsilon"],
texConfigPostScript = []
}
| null | https://raw.githubusercontent.com/reanimate/reanimate/5ea023980ff7f488934d40593cc5069f5fd038b0/src/Reanimate/LaTeX.hs | haskell | # LANGUAGE DeriveAnyClass #
# LANGUAGE OverloadedStrings #
# LANGUAGE PatternSynonyms #
|
Maintainer :
Stability : experimental
Portability : POSIX
* Font configurations
| TeX backends. They have different features and capabilities.
| TeX configurations can load packages and set up environments for tex scripts.
| Render TeX script using a given configuration.
| Invoke latex and import the result as an SVG object. SVG objects are
cached to improve performance.
Example:
<<docs/gifs/doc_latex.gif>>
| Invoke latex with extra script headers.
| Invoke latex using a given configuration and separate results.
| Render math formula and separate results.
| Invoke latex using a given configuration and separate results.
| Invoke latex and separate results.
| Invoke xelatex and import the result as an SVG object. SVG objects are
| Invoke xelatex with extra script headers.
support for non-western scripts.
Example:
> ctex "中文"
<<docs/gifs/doc_ctex.gif>>
| Invoke xelatex with extra script headers + ctex headers.
| Invoke latex and import the result as an SVG object. SVG objects are
context.
Example:
> latexAlign "R = \\frac{{\\Delta x}}{{kA}}"
<<docs/gifs/doc_latexAlign.gif>>
executable, arguments, header, tex
use glyphs instead of fonts.
| Chalkduster font. Depends on lualatex.
@
`latexCfg` `chalkduster` "chalkduster"
@
<<docs/gifs/doc_chalkduster.gif>>
|
@
@
<<docs/gifs/doc_calligra.gif>>
|
@
`latexCfg` `noto` "noto"
@
<<docs/gifs/doc_noto.gif>>
|
@
`latexCfg` `helvet` "helvet"
@
<<docs/gifs/doc_helvet.gif>>
|
@
`latexCfg` `libertine` "libertine"
@
<<docs/gifs/doc_libertine.gif>>
|
@
`latexCfg` `biolinum` "biolinum"
@
<<docs/gifs/doc_biolinum.gif>>
|
@
`latexCfg` `droidSerif` "droidSerif"
@
<<docs/gifs/doc_droidSerif.gif>>
|
@
`latexCfg` `droidSans` "droidSans"
@
<<docs/gifs/doc_droidSans.gif>> | # LANGUAGE DeriveGeneric #
# LANGUAGE ScopedTypeVariables #
Copyright : Written by
License :
module Reanimate.LaTeX
( latexCfg,
TexEngine (..),
TexConfig (..),
latex,
latexWithHeaders,
latexChunks,
latexCfgChunks,
latexCfgChunksTrans,
mathChunks,
xelatex,
xelatexWithHeaders,
ctex,
ctexWithHeaders,
latexAlign,
chalkduster,
calligra,
noto,
helvet,
libertine,
biolinum,
droidSerif,
droidSans,
)
where
import Control.Lens ((&), (.~))
import Control.Monad.State (runState, state)
import qualified Data.ByteString as B
import Data.Foldable (Foldable (fold))
import Data.Hashable (Hashable)
import Data.Text (Text)
import qualified Data.Text as T
import qualified Data.Text.Encoding as T
import qualified Data.Text.IO as T
import GHC.Generics (Generic)
import Graphics.SvgTree (Tree, clipPathRef, clipRule, mapTree, parseSvgFile,
pattern ClipPathTree, pattern None, strokeColor)
import Reanimate.Animation (SVG)
import Reanimate.Cache (cacheDiskSvg, cacheMem)
import Reanimate.External (zipArchive)
import Reanimate.Misc (requireExecutable, runCmd, withTempDir, withTempFile)
import Reanimate.Parameters (pNoExternals)
import Reanimate.Svg
import System.FilePath (replaceExtension, takeFileName, (</>))
import System.IO.Unsafe (unsafePerformIO)
data TexEngine = LaTeX | XeLaTeX | LuaLaTeX
deriving (Generic, Hashable, Eq, Ord, Read, Show)
data TexConfig = TexConfig
{ texConfigEngine :: TexEngine,
texConfigHeaders :: [T.Text],
texConfigPostScript :: [T.Text]
}
deriving (Generic, Hashable, Read, Show, Eq, Ord)
defaultTexConfig :: TexConfig
defaultTexConfig = TexConfig LaTeX [] []
latexCfg :: TexConfig -> T.Text -> SVG
latexCfg (TexConfig engine headers postscript) =
gen postscript headers
where
gen =
case engine of
LaTeX -> someTexWithHeaders engine "latex" "dvi" []
XeLaTeX -> someTexWithHeaders engine "xelatex" "xdv" ["-no-pdf"]
LuaLaTeX -> someTexWithHeaders engine "lualatex" "pdf" []
> latex " $ e^{i\\pi}+1=0 $ "
latex :: T.Text -> Tree
latex = latexWithHeaders []
latexWithHeaders :: [T.Text] -> T.Text -> Tree
latexWithHeaders = someTexWithHeaders LaTeX "latex" "dvi" [] []
someTexWithHeaders ::
TexEngine ->
String ->
String ->
[String] ->
[T.Text] ->
[T.Text] ->
T.Text ->
Tree
someTexWithHeaders _engine _exec _dvi _args _postscript _headers tex
| pNoExternals = mkText tex
someTexWithHeaders engine exec dvi args postscript headers tex =
(unsafePerformIO . (cacheMem . cacheDiskSvg) (latexToSVG engine dvi exec args))
script
where
script = mkTexScript exec args headers (T.unlines (postscript ++ [tex]))
Apply the transformation to the LaTeX segments .
See also ' ' , the transformation is @(\s - > " $ " < > s < > " $ " ) @.
latexCfgChunksTrans :: Traversable t => TexConfig -> (T.Text -> T.Text) -> t T.Text -> t Tree
latexCfgChunksTrans _cfg f chunks | pNoExternals = fmap (mkText . f) chunks
latexCfgChunksTrans cfg f chunks = worker $ svgGlyphs $ tex $ f $ fold chunks
where
tex = latexCfg cfg
merge lst = mkGroup [fmt svg | (fmt, _, svg) <- lst]
checkResult (r, []) = r
checkResult (_, _) = error "latex chunk mismatch"
worker = checkResult . runState (mapM (state . workerSingle) (fmap f chunks))
workerSingle x everything =
let width = length $ svgGlyphs (tex x)
(first, rest) = splitAt width everything
in (merge first, rest)
mathChunks :: Traversable t => t T.Text -> t Tree
mathChunks = latexCfgChunksTrans defaultTexConfig (\s -> "$" <> s <> "$")
latexCfgChunks :: Traversable t => TexConfig -> t T.Text -> t Tree
latexCfgChunks cfg = latexCfgChunksTrans cfg id
latexChunks :: Traversable t => t T.Text -> t Tree
latexChunks = latexCfgChunksTrans defaultTexConfig id
cached to improve performance . Xelatex has support for non - western scripts .
xelatex :: Text -> Tree
xelatex = xelatexWithHeaders []
xelatexWithHeaders :: [T.Text] -> T.Text -> Tree
xelatexWithHeaders = someTexWithHeaders XeLaTeX "xelatex" "xdv" ["-no-pdf"] []
| Invoke xelatex with " } " and import the result as an
SVG object . SVG objects are cached to improve performance . Xelatex has
ctex :: T.Text -> Tree
ctex = ctexWithHeaders []
ctexWithHeaders :: [T.Text] -> T.Text -> Tree
ctexWithHeaders headers = xelatexWithHeaders ("\\usepackage[UTF8]{ctex}" : headers)
cached to improve performance . This wraps the TeX code in an ' align * '
latexAlign :: Text -> Tree
latexAlign tex = latex $ T.unlines ["\\begin{align*}", tex, "\\end{align*}"]
postprocess :: Tree -> Tree
postprocess =
simplify
. lowerTransformations
. scaleXY 0.1 (-0.1)
. removeClipPaths
. lowerIds
. mapTree clearDrawAttr
where
clearDrawAttr t = t & strokeColor .~ Nothing
enginePostprocess :: TexEngine -> Tree -> Tree
enginePostprocess LuaLaTeX svg = translate 0 (svgHeight svg) svg
enginePostprocess _ svg = svg
removeClipPaths :: SVG -> SVG
removeClipPaths = mapTree worker
where
worker ClipPathTree {} = None
worker t = t & clipRule .~ Nothing & clipPathRef .~ Nothing
latexToSVG :: TexEngine -> String -> String -> [String] -> Text -> IO Tree
latexToSVG engine dviExt latexExec latexArgs tex = do
latexBin <- requireExecutable latexExec
withTempDir $ \tmp_dir -> withTempFile "tex" $ \tex_file ->
withTempFile "svg" $ \svg_file -> do
let dvi_file =
tmp_dir </> replaceExtension (takeFileName tex_file) dviExt
B.writeFile tex_file (T.encodeUtf8 tex)
runCmd
latexBin
( latexArgs
++ [ "-interaction=nonstopmode",
"-halt-on-error",
"-output-directory=" ++ tmp_dir,
tex_file
]
)
if dviExt == "pdf"
then do
pdf2svg <- requireExecutable "pdf2svg"
runCmd
pdf2svg
[dvi_file, svg_file]
else do
dvisvgm <- requireExecutable "dvisvgm"
runCmd
dvisvgm
[ dvi_file,
"--precision=5",
better bboxes .
"--verbosity=0",
"-o",
svg_file
]
svg_data <- T.readFile svg_file
case parseSvgFile svg_file svg_data of
Nothing -> error "Malformed svg"
Just svg ->
return $
enginePostprocess engine $
postprocess $ unbox $ replaceUses svg
mkTexScript :: String -> [String] -> [Text] -> Text -> Text
mkTexScript latexExec latexArgs texHeaders tex =
T.unlines $
[ "% " <> T.pack (unwords (latexExec : latexArgs)),
"\\documentclass[preview]{standalone}",
"\\usepackage{amsmath}",
"\\usepackage{gensymb}"
]
++ texHeaders
++ [ "\\usepackage[english]{babel}",
"\\linespread{1}",
"\\begin{document}",
tex,
"\\end{document}"
]
Packages used by manim .
\\\usepackage{amsmath}\n\
\\\usepackage{amssymb}\n\
\\\usepackage{dsfont}\n\
\\\usepackage{setspace}\n\
\\\usepackage{relsize}\n\
\\\usepackage{textcomp}\n\
\\\usepackage{mathrsfs}\n\
\\\usepackage{calligra}\n\
\\\usepackage{wasysym}\n\
\\\usepackage{ragged2e}\n\
\\\usepackage{physics}\n\
\\\usepackage{xcolor}\n\
\\\usepackage{textcomp}\n\
\\\usepackage{xfrac}\n\
\\\usepackage{microtype}\n\
\\\usepackage{amsmath}\n\
\\\usepackage{amssymb}\n\
\\\usepackage{dsfont}\n\
\\\usepackage{setspace}\n\
\\\usepackage{relsize}\n\
\\\usepackage{textcomp}\n\
\\\usepackage{mathrsfs}\n\
\\\usepackage{calligra}\n\
\\\usepackage{wasysym}\n\
\\\usepackage{ragged2e}\n\
\\\usepackage{physics}\n\
\\\usepackage{xcolor}\n\
\\\usepackage{textcomp}\n\
\\\usepackage{xfrac}\n\
\\\usepackage{microtype}\n\
-}
files are automatically downloaded .
chalkduster :: TexConfig
chalkduster =
TexConfig
{ texConfigEngine = XeLaTeX,
texConfigHeaders =
[ "\\usepackage[no-math]{fontspec}",
"\\setmainfont[Mapping=tex-text,Path={" <> chalkdusterFont <> "/},Extension=.ttf]{Chalkduster}",
"\\usepackage[defaultmathsizes]{mathastext}"
],
texConfigPostScript = []
}
where
chalkdusterFont =
T.pack $
zipArchive
""
"Wplv4RjuFiI0hDQnAM5MVHl2evrZqWstRLdVAfBomCM="
` latexCfg ` ` calligra ` " "
calligra :: TexConfig
calligra =
TexConfig
{ texConfigEngine = LaTeX,
texConfigHeaders = ["\\usepackage{calligra}"],
texConfigPostScript = ["\\calligra"]
}
noto :: TexConfig
noto =
TexConfig
{ texConfigEngine = LaTeX,
texConfigHeaders = ["\\usepackage{noto}"],
texConfigPostScript = []
}
helvet :: TexConfig
helvet =
TexConfig
{ texConfigEngine = LaTeX,
texConfigHeaders = ["\\usepackage{helvet}"],
texConfigPostScript = []
}
libertine :: TexConfig
libertine =
TexConfig
{ texConfigEngine = LaTeX,
texConfigHeaders = ["\\usepackage{libertine}"],
texConfigPostScript = []
}
biolinum :: TexConfig
biolinum =
TexConfig
{ texConfigEngine = LaTeX,
texConfigHeaders =
["\\usepackage{libertine}"
,"\\renewcommand{\\familydefault}{\\sfdefault}"],
texConfigPostScript = []
}
droidSerif :: TexConfig
droidSerif =
TexConfig
{ texConfigEngine = LaTeX,
texConfigHeaders =
["\\usepackage[default]{droidserif}"
,"\\let\\varepsilon\\epsilon"],
texConfigPostScript = []
}
droidSans :: TexConfig
droidSans =
TexConfig
{ texConfigEngine = LaTeX,
texConfigHeaders =
["\\usepackage[default]{droidsans}"
,"\\let\\varepsilon\\epsilon"],
texConfigPostScript = []
}
|
a4e4a0e3f080424b74756d5f3bbc9e96bb59d4fd0e7d61ad4455e1225e2123e2 | yedi/rhyme-finder | core_test.clj | (ns rhyme-finder.core-test
(:use clojure.test
rhyme-finder.core))
(deftest a-test
(testing "FIXME, I fail."
(is (= 0 1)))) | null | https://raw.githubusercontent.com/yedi/rhyme-finder/c2f994606794e16361f04b03950113ce82a4e090/test/rhyme_finder/core_test.clj | clojure | (ns rhyme-finder.core-test
(:use clojure.test
rhyme-finder.core))
(deftest a-test
(testing "FIXME, I fail."
(is (= 0 1)))) | |
791f484508e16442f39d49c38a5cec35b0b75b354ffc71566529da46855455ea | ilevd/hicosql | core_test.clj | (ns hicosql.core-test
(:require [clojure.test :refer :all]
[hicosql.core :refer :all]))
(deftest run-file-test
(testing "__include test"
(is (= (run-file "../test/data/main.sql"))
(flatland.ordered.map/ordered-map
:a 300
:a2 5
:b 10
:c 20
:main_req "hello world"))))
| null | https://raw.githubusercontent.com/ilevd/hicosql/0393ce9ccd3b77f6b96376545ab1db4d66c2acd5/test/hicosql/core_test.clj | clojure | (ns hicosql.core-test
(:require [clojure.test :refer :all]
[hicosql.core :refer :all]))
(deftest run-file-test
(testing "__include test"
(is (= (run-file "../test/data/main.sql"))
(flatland.ordered.map/ordered-map
:a 300
:a2 5
:b 10
:c 20
:main_req "hello world"))))
| |
1a25d74127462f433b4e3a0da4b19f54c64fafbb9dd0f440b506fe5b0af1e633 | tobyhede/lein-git-deps | git_deps.clj | (ns leiningen.git-deps
"How this works: It clones projects into .lein-git-deps/<whatever>.
If the directory already exists, it does a git pull and git checkout."
(:require [clojure.java.shell :as sh]
[clojure.java.io :as io]
[clojure.string :as string]
[robert.hooke :as hooke]
[leiningen.deps :as deps]
[leiningen.core.project :as lein-project]))
;; Why, you might ask, are we using str here instead of simply def'ing
;; the var to a string directly? The answer is that we are working
;; around a bug in marginalia where it can't tell the difference
;; between the string that's the value for a def and a docstring. It
will hopefully be fixed RSN , because this makes us feel dirty .
(def ^{:private true
:doc "The directory into which dependencies will be cloned."}
git-deps-dir (str ".lein-git-deps"))
(defn- directory-exists?
"Return true if the specified directory exists."
[dir]
(.isDirectory (io/file dir)))
(defn- default-clone-dir
"Given a git URL, return the directory it would clone into by default."
[uri]
(string/join "." (-> uri
(string/split #"/")
(last)
(string/split #"\.")
butlast)))
(defn- exec
"Run a command, throwing an exception if it fails, returning the
result as with clojure.java.shell/sh."
[& args]
(let [{:keys [exit out err] :as result} (apply sh/sh args)]
(if (zero? exit)
result
(throw
(Exception.
(format "Command %s failed with exit code %s\n%s\n%s"
(apply str (interpose " " args))
exit
out
err))))))
(defn- git-clone
"Clone the git repository at url into dir-name while working in
directory working-dir."
[url dir-name working-dir]
(apply exec (remove nil? ["git" "clone" url (str dir-name) :dir working-dir])))
(defn- git-checkout
"Check out the specified commit in dir."
[commit dir]
(println "Running git checkout " commit " in " (str dir))
(exec "git" "checkout" commit :dir dir))
(defn- git-submodule-init
"Initalize submodules in the given dir"
[dir]
(println "Running git submodule init in " (str dir))
(exec "git" "submodule" "init" :dir dir))
(defn- git-submodule-update
"Update submodules in the given dir"
[dir]
(println "Running git submodule update in " (str dir))
(exec "git" "submodule" "update" :dir dir))
(defn- detached-head?
"Return true if the git repository in dir has HEAD detached."
[dir]
(let [{out :out} (exec "git" "branch" "--no-color" :dir dir)
lines (string/split-lines out)
current-branch (first (filter #(.startsWith % "*") lines))]
(when-not current-branch
(throw (Exception. "Unable to determine current branch")))
(or (= current-branch "* (no branch)") (.startsWith current-branch "* (detached"))))
(defn- git-pull
"Run 'git-pull' in directory dir, but only if we're on a branch. If
HEAD is detached, we only do a fetch, not a full pull."
[dir]
(println "Running git pull on " (str dir))
(if (detached-head? dir)
(do
(println "Not on a branch, so fetching instead of pulling.")
(exec "git" "fetch" :dir dir))
(exec "git" "pull" :dir dir)))
(defn- git-rev-parse
"Run 'git-rev-parse' and return it's output."
[rev dir]
(:out (exec "git" "rev-parse" "--revs-only" rev :dir dir)))
(defn- git-dependencies
"Return a map of the project's git dependencies."
[project]
(map (fn [dep]
(let [[dep-url commit {clone-dir-name :dir src :src}] dep
commit (or commit "master")
clone-dir-name (or clone-dir-name (default-clone-dir dep-url))
clone-dir (io/file git-deps-dir clone-dir-name)
src (or src "src")]
{:dep dep
:dep-url dep-url
:commit commit
:clone-dir-name clone-dir-name
:clone-dir clone-dir
:src src}))
(:git-dependencies project)))
(defn- needs-update?
"Checks if the required rev (aka commit) from :git-dependencies
is the same as HEAD."
[clone-dir commit]
(let [head (git-rev-parse "HEAD" clone-dir)
need (git-rev-parse commit clone-dir)]
(or (empty? head) (empty? need) (not= head need))))
(defn- checkout-repo
"Performs the actual checkout steps."
[clone-dir commit]
(git-checkout commit clone-dir)
(git-submodule-init clone-dir)
(git-submodule-update clone-dir))
(defn- setup-git-repo
"Performs all the steps to set up a single git repo.
Besides the dependency map, this function also accepts
an optional `mode` argument.
When (= mode :force-update), the repo is updated via git-pull.
If mode is not given, the repo is only updated if
needs-update? returns true."
[dep & mode]
(when-not (directory-exists? git-deps-dir)
(.mkdir (io/file git-deps-dir)))
(let [mode (first mode)
{:keys [dep-url commit clone-dir-name clone-dir]} dep]
(if (directory-exists? clone-dir)
(if (or (= mode :force-update) (needs-update? clone-dir commit))
(do
(git-pull clone-dir)
(checkout-repo clone-dir commit)))
(do
(git-clone dep-url clone-dir-name git-deps-dir)
(checkout-repo clone-dir commit)))))
(defn git-deps
"A leiningen task that will pull dependencies in via git.
Dependencies should be listed in project.clj under the
:git-dependencies key in one of these three forms:
First form : just a URL .
[\"\"]
Second form : A URL and a ref , which can be anything
;; you can specify for 'git checkout', like a commit id
;; or a branch name.
[\"\"
\"329708b\"]
Third form : A URL , a commit , and a map
;; all keys in the map are optional
[\"\"
\"some-branch\"
{:dir \"alternate-directory-to-clone-to\"
:src \"alternate-src-directory-within-repo\"}]]
"
[project]
(doseq [dep (git-dependencies project)]
(let [{dep-str :dep} dep]
(println "Setting up dependency for " dep-str)
(setup-git-repo dep :force-update))))
(defn hooks
"Called by leiningen via lein-git-deps.plugin/hooks."
[]
(hooke/add-hook #'deps/deps (fn [task & args]
(apply task args)
(git-deps (first args)))))
(defn- add-source-paths
"Given a project and a dependency map (dep), adds the dependency's
soure-path to the main project"
[project dep]
(let [dep-src (-> dep :clone-dir .getAbsolutePath (str "/" (:src dep)))]
(update-in project [:source-paths] conj dep-src)))
(defn- add-dependencies
"Given a project and a dependency map (dep), adds all of the dependency's
dependencies to the main project."
[project dep]
(let [dep-proj-path (-> dep :clone-dir .getAbsolutePath (str "/project.clj"))]
(try
(let [dep-proj (lein-project/read dep-proj-path)
dep-deps (:dependencies dep-proj)]
(update-in project [:dependencies] #(apply conj % dep-deps)))
(catch Exception ex
(println "Could not read git-dep's project:" dep-proj-path)
project))))
(defn middleware
"Called by leiningen via lein-git-deps.plugin/middleware."
[project]
(let [deps (git-dependencies project)]
(doseq [dep deps] (setup-git-repo dep))
(reduce add-source-paths (reduce add-dependencies project deps) deps)))
| null | https://raw.githubusercontent.com/tobyhede/lein-git-deps/da5d36ebe6058720b1efc3f5d55c4cca4fc47831/src/leiningen/git_deps.clj | clojure | Why, you might ask, are we using str here instead of simply def'ing
the var to a string directly? The answer is that we are working
around a bug in marginalia where it can't tell the difference
between the string that's the value for a def and a docstring. It
you can specify for 'git checkout', like a commit id
or a branch name.
all keys in the map are optional | (ns leiningen.git-deps
"How this works: It clones projects into .lein-git-deps/<whatever>.
If the directory already exists, it does a git pull and git checkout."
(:require [clojure.java.shell :as sh]
[clojure.java.io :as io]
[clojure.string :as string]
[robert.hooke :as hooke]
[leiningen.deps :as deps]
[leiningen.core.project :as lein-project]))
will hopefully be fixed RSN , because this makes us feel dirty .
(def ^{:private true
:doc "The directory into which dependencies will be cloned."}
git-deps-dir (str ".lein-git-deps"))
(defn- directory-exists?
"Return true if the specified directory exists."
[dir]
(.isDirectory (io/file dir)))
(defn- default-clone-dir
"Given a git URL, return the directory it would clone into by default."
[uri]
(string/join "." (-> uri
(string/split #"/")
(last)
(string/split #"\.")
butlast)))
(defn- exec
"Run a command, throwing an exception if it fails, returning the
result as with clojure.java.shell/sh."
[& args]
(let [{:keys [exit out err] :as result} (apply sh/sh args)]
(if (zero? exit)
result
(throw
(Exception.
(format "Command %s failed with exit code %s\n%s\n%s"
(apply str (interpose " " args))
exit
out
err))))))
(defn- git-clone
"Clone the git repository at url into dir-name while working in
directory working-dir."
[url dir-name working-dir]
(apply exec (remove nil? ["git" "clone" url (str dir-name) :dir working-dir])))
(defn- git-checkout
"Check out the specified commit in dir."
[commit dir]
(println "Running git checkout " commit " in " (str dir))
(exec "git" "checkout" commit :dir dir))
(defn- git-submodule-init
"Initalize submodules in the given dir"
[dir]
(println "Running git submodule init in " (str dir))
(exec "git" "submodule" "init" :dir dir))
(defn- git-submodule-update
"Update submodules in the given dir"
[dir]
(println "Running git submodule update in " (str dir))
(exec "git" "submodule" "update" :dir dir))
(defn- detached-head?
"Return true if the git repository in dir has HEAD detached."
[dir]
(let [{out :out} (exec "git" "branch" "--no-color" :dir dir)
lines (string/split-lines out)
current-branch (first (filter #(.startsWith % "*") lines))]
(when-not current-branch
(throw (Exception. "Unable to determine current branch")))
(or (= current-branch "* (no branch)") (.startsWith current-branch "* (detached"))))
(defn- git-pull
"Run 'git-pull' in directory dir, but only if we're on a branch. If
HEAD is detached, we only do a fetch, not a full pull."
[dir]
(println "Running git pull on " (str dir))
(if (detached-head? dir)
(do
(println "Not on a branch, so fetching instead of pulling.")
(exec "git" "fetch" :dir dir))
(exec "git" "pull" :dir dir)))
(defn- git-rev-parse
"Run 'git-rev-parse' and return it's output."
[rev dir]
(:out (exec "git" "rev-parse" "--revs-only" rev :dir dir)))
(defn- git-dependencies
"Return a map of the project's git dependencies."
[project]
(map (fn [dep]
(let [[dep-url commit {clone-dir-name :dir src :src}] dep
commit (or commit "master")
clone-dir-name (or clone-dir-name (default-clone-dir dep-url))
clone-dir (io/file git-deps-dir clone-dir-name)
src (or src "src")]
{:dep dep
:dep-url dep-url
:commit commit
:clone-dir-name clone-dir-name
:clone-dir clone-dir
:src src}))
(:git-dependencies project)))
(defn- needs-update?
"Checks if the required rev (aka commit) from :git-dependencies
is the same as HEAD."
[clone-dir commit]
(let [head (git-rev-parse "HEAD" clone-dir)
need (git-rev-parse commit clone-dir)]
(or (empty? head) (empty? need) (not= head need))))
(defn- checkout-repo
"Performs the actual checkout steps."
[clone-dir commit]
(git-checkout commit clone-dir)
(git-submodule-init clone-dir)
(git-submodule-update clone-dir))
(defn- setup-git-repo
"Performs all the steps to set up a single git repo.
Besides the dependency map, this function also accepts
an optional `mode` argument.
When (= mode :force-update), the repo is updated via git-pull.
If mode is not given, the repo is only updated if
needs-update? returns true."
[dep & mode]
(when-not (directory-exists? git-deps-dir)
(.mkdir (io/file git-deps-dir)))
(let [mode (first mode)
{:keys [dep-url commit clone-dir-name clone-dir]} dep]
(if (directory-exists? clone-dir)
(if (or (= mode :force-update) (needs-update? clone-dir commit))
(do
(git-pull clone-dir)
(checkout-repo clone-dir commit)))
(do
(git-clone dep-url clone-dir-name git-deps-dir)
(checkout-repo clone-dir commit)))))
(defn git-deps
"A leiningen task that will pull dependencies in via git.
Dependencies should be listed in project.clj under the
:git-dependencies key in one of these three forms:
First form : just a URL .
[\"\"]
Second form : A URL and a ref , which can be anything
[\"\"
\"329708b\"]
Third form : A URL , a commit , and a map
[\"\"
\"some-branch\"
{:dir \"alternate-directory-to-clone-to\"
:src \"alternate-src-directory-within-repo\"}]]
"
[project]
(doseq [dep (git-dependencies project)]
(let [{dep-str :dep} dep]
(println "Setting up dependency for " dep-str)
(setup-git-repo dep :force-update))))
(defn hooks
"Called by leiningen via lein-git-deps.plugin/hooks."
[]
(hooke/add-hook #'deps/deps (fn [task & args]
(apply task args)
(git-deps (first args)))))
(defn- add-source-paths
"Given a project and a dependency map (dep), adds the dependency's
soure-path to the main project"
[project dep]
(let [dep-src (-> dep :clone-dir .getAbsolutePath (str "/" (:src dep)))]
(update-in project [:source-paths] conj dep-src)))
(defn- add-dependencies
"Given a project and a dependency map (dep), adds all of the dependency's
dependencies to the main project."
[project dep]
(let [dep-proj-path (-> dep :clone-dir .getAbsolutePath (str "/project.clj"))]
(try
(let [dep-proj (lein-project/read dep-proj-path)
dep-deps (:dependencies dep-proj)]
(update-in project [:dependencies] #(apply conj % dep-deps)))
(catch Exception ex
(println "Could not read git-dep's project:" dep-proj-path)
project))))
(defn middleware
"Called by leiningen via lein-git-deps.plugin/middleware."
[project]
(let [deps (git-dependencies project)]
(doseq [dep deps] (setup-git-repo dep))
(reduce add-source-paths (reduce add-dependencies project deps) deps)))
|
ddb93c19ad5fc21c5060596007860377bb1e1a18619144cab96a9e8411acf547 | jlongster/grime3d | grime3d.scm |
(define grime3d-host #f)
(define grime3d-connection #f)
(define (grime3d-set-host! host)
(set! grime3d-host host))
(define (grime3d-connect)
(let ((proc (open-tcp-client grime3d-host)))
(thread-start!
(make-thread
(lambda ()
(let loop ()
(let ((msg (read proc)))
(if (not (eof-object? msg))
(begin
(grime3d-handle-message msg)
(loop))))))))))
(define (grime3d-handle-message msg)
#!void)
| null | https://raw.githubusercontent.com/jlongster/grime3d/becacf1e40f846e3a1abf4daca3d98694df71028/lib/util/remote-debugger/grime3d.scm | scheme |
(define grime3d-host #f)
(define grime3d-connection #f)
(define (grime3d-set-host! host)
(set! grime3d-host host))
(define (grime3d-connect)
(let ((proc (open-tcp-client grime3d-host)))
(thread-start!
(make-thread
(lambda ()
(let loop ()
(let ((msg (read proc)))
(if (not (eof-object? msg))
(begin
(grime3d-handle-message msg)
(loop))))))))))
(define (grime3d-handle-message msg)
#!void)
| |
71076b85cf6258a3827b75200f4833991b404b7a5e9f77db1d111d2155ecace3 | roelvandijk/numerals | TestData.hs | |
[ @ISO639 - 1@ ] -
[ @ISO639 - 2@ ] mic
[ @ISO639 - 3@ ] mic
[ @Native name@ ]
[ @English name@ ] Mi'kmaq
[@ISO639-1@] -
[@ISO639-2@] mic
[@ISO639-3@] mic
[@Native name@] Míkmawísimk
[@English name@] Mi'kmaq
-}
module Text.Numeral.Language.MIC.TestData (cardinals) where
--------------------------------------------------------------------------------
-- Imports
--------------------------------------------------------------------------------
import "numerals" Text.Numeral.Grammar ( defaultInflection )
import "this" Text.Numeral.Test ( TestData )
--------------------------------------------------------------------------------
-- Test data
--------------------------------------------------------------------------------
{-
Sources:
-to-count-in-micmac/en/mic/
-}
cardinals :: (Num i) => TestData i
cardinals =
[ ( "default"
, defaultInflection
, [ (1, "ne’wt")
, (2, "ta’pu")
, (3, "si’st")
, (4, "ne’w")
, (5, "na’n")
, (6, "asukom")
, (7, "l’uiknek")
, (8, "ukmuljin")
, (9, "pesqunatek")
, (10, "newtiska’q")
, (11, "newtiska’q jel ne’wt")
, (12, "newtiska’q jel ta’pu")
, (13, "newtiska’q jel si’st")
, (14, "newtiska’q jel ne’w")
, (15, "newtiska’q jel na’n")
, (16, "newtiska’q jel asukom")
, (17, "newtiska’q jel l’uiknek")
, (18, "newtiska’q jel ukmuljin")
, (19, "newtiska’q jel pesqunatek")
, (20, "tapuiska’q")
, (21, "tapuiska’q jel ne’wt")
, (22, "tapuiska’q jel ta’pu")
, (23, "tapuiska’q jel si’st")
, (24, "tapuiska’q jel ne’w")
, (25, "tapuiska’q jel na’n")
, (26, "tapuiska’q jel asukom")
, (27, "tapuiska’q jel l’uiknek")
, (28, "tapuiska’q jel ukmuljin")
, (29, "tapuiska’q jel pesqunatek")
, (30, "nesiska’q")
, (31, "nesiska’q jel ne’wt")
, (32, "nesiska’q jel ta’pu")
, (33, "nesiska’q jel si’st")
, (34, "nesiska’q jel ne’w")
, (35, "nesiska’q jel na’n")
, (36, "nesiska’q jel asukom")
, (37, "nesiska’q jel l’uiknek")
, (38, "nesiska’q jel ukmuljin")
, (39, "nesiska’q jel pesqunatek")
, (40, "newiska’q")
, (41, "newiska’q jel ne’wt")
, (42, "newiska’q jel ta’pu")
, (43, "newiska’q jel si’st")
, (44, "newiska’q jel ne’w")
, (45, "newiska’q jel na’n")
, (46, "newiska’q jel asukom")
, (47, "newiska’q jel l’uiknek")
, (48, "newiska’q jel ukmuljin")
, (49, "newiska’q jel pesqunatek")
, (50, "naniska’q")
, (51, "naniska’q jel ne’wt")
, (52, "naniska’q jel ta’pu")
, (53, "naniska’q jel si’st")
, (54, "naniska’q jel ne’w")
, (55, "naniska’q jel na’n")
, (56, "naniska’q jel asukom")
, (57, "naniska’q jel l’uiknek")
, (58, "naniska’q jel ukmuljin")
, (59, "naniska’q jel pesqunatek")
, (60, "asukom te’siska’q")
, (61, "asukom te’siska’q jel ne’wt")
, (62, "asukom te’siska’q jel ta’pu")
, (63, "asukom te’siska’q jel si’st")
, (64, "asukom te’siska’q jel ne’w")
, (65, "asukom te’siska’q jel na’n")
, (66, "asukom te’siska’q jel asukom")
, (67, "asukom te’siska’q jel l’uiknek")
, (68, "asukom te’siska’q jel ukmuljin")
, (69, "asukom te’siska’q jel pesqunatek")
, (70, "l’uiknek te’siska’q")
, (71, "l’uiknek te’siska’q jel ne’wt")
, (72, "l’uiknek te’siska’q jel ta’pu")
, (73, "l’uiknek te’siska’q jel si’st")
, (74, "l’uiknek te’siska’q jel ne’w")
, (75, "l’uiknek te’siska’q jel na’n")
, (76, "l’uiknek te’siska’q jel asukom")
, (77, "l’uiknek te’siska’q jel l’uiknek")
, (78, "l’uiknek te’siska’q jel ukmuljin")
, (79, "l’uiknek te’siska’q jel pesqunatek")
, (80, "ukmuljin te’siska’q")
, (81, "ukmuljin te’siska’q jel ne’wt")
, (82, "ukmuljin te’siska’q jel ta’pu")
, (83, "ukmuljin te’siska’q jel si’st")
, (84, "ukmuljin te’siska’q jel ne’w")
, (85, "ukmuljin te’siska’q jel na’n")
, (86, "ukmuljin te’siska’q jel asukom")
, (87, "ukmuljin te’siska’q jel l’uiknek")
, (88, "ukmuljin te’siska’q jel ukmuljin")
, (89, "ukmuljin te’siska’q jel pesqunatek")
, (90, "pesqunatek te’siska’q")
, (91, "pesqunatek te’siska’q jel ne’wt")
, (92, "pesqunatek te’siska’q jel ta’pu")
, (93, "pesqunatek te’siska’q jel si’st")
, (94, "pesqunatek te’siska’q jel ne’w")
, (95, "pesqunatek te’siska’q jel na’n")
, (96, "pesqunatek te’siska’q jel asukom")
, (97, "pesqunatek te’siska’q jel l’uiknek")
, (98, "pesqunatek te’siska’q jel ukmuljin")
, (99, "pesqunatek te’siska’q jel pesqunatek")
, (100, "kaskimtlnaqn")
, (101, "kaskimtlnaqn te’siska’q jel ne’wt")
, (102, "kaskimtlnaqn te’siska’q jel ta’pu")
, (103, "kaskimtlnaqn te’siska’q jel si’st")
, (104, "kaskimtlnaqn te’siska’q jel ne’w")
, (105, "kaskimtlnaqn te’siska’q jel na’n")
, (106, "kaskimtlnaqn te’siska’q jel asukom")
, (107, "kaskimtlnaqn te’siska’q jel l’uiknek")
, (108, "kaskimtlnaqn te’siska’q jel ukmuljin")
, (109, "kaskimtlnaqn te’siska’q jel pesqunatek")
, (110, "kaskimtlnaqn te’siska’q jel newtiska’q")
, (123, "kaskimtlnaqn te’siska’q jel tapuiska’q jel si’st")
, (200, "ta’pu kaskimtlnaqn")
, (300, "si’st kaskimtlnaqn")
, (321, "si’st kaskimtlnaqn te’siska’q jel tapuiska’q jel ne’wt")
, (400, "ne’w kaskimtlnaqn")
, (500, "na’n kaskimtlnaqn")
, (600, "asukom kaskimtlnaqn")
, (700, "l’uiknek kaskimtlnaqn")
, (800, "ukmuljin kaskimtlnaqn")
, (900, "pesqunatek kaskimtlnaqn")
, (909, "pesqunatek kaskimtlnaqn te’siska’q jel pesqunatek")
, (990, "pesqunatek kaskimtlnaqn te’siska’q jel pesqunatek te’siska’q")
, (999, "pesqunatek kaskimtlnaqn te’siska’q jel pesqunatek te’siska’q jel pesqunatek")
, (1000, "pituimtlnaqn")
, (1001, "pituimtlnaqn te’siska’q jel ne’wt")
, (1008, "pituimtlnaqn te’siska’q jel ukmuljin")
, (1234, "pituimtlnaqn te’siska’q jel ta’pu kaskimtlnaqn te’siska’q jel nesiska’q jel ne’w")
, (2000, "ta’pu pituimtlnaqn")
, (3000, "si’st pituimtlnaqn")
, (4000, "ne’w pituimtlnaqn")
, (4321, "ne’w pituimtlnaqn te’siska’q jel si’st kaskimtlnaqn te’siska’q jel tapuiska’q jel ne’wt")
, (5000, "na’n pituimtlnaqn")
, (6000, "asukom pituimtlnaqn")
, (7000, "l’uiknek pituimtlnaqn")
, (8000, "ukmuljin pituimtlnaqn")
, (9000, "pesqunatek pituimtlnaqn")
, (10000, "pituimtlnaqnepikatun")
, (12345, "newtiska’q jel ta’pu pituimtlnaqn te’siska’q jel si’st kaskimtlnaqn te’siska’q jel newiska’q jel na’n")
, (20000, "tapuiska’q pituimtlnaqn")
, (30000, "nesiska’q pituimtlnaqn")
, (40000, "newiska’q pituimtlnaqn")
, (50000, "naniska’q pituimtlnaqn")
, (54321, "naniska’q jel ne’w pituimtlnaqn te’siska’q jel si’st kaskimtlnaqn te’siska’q jel tapuiska’q jel ne’wt")
, (60000, "asukom te’siska’q pituimtlnaqn")
, (70000, "l’uiknek te’siska’q pituimtlnaqn")
, (80000, "ukmuljin te’siska’q pituimtlnaqn")
, (90000, "pesqunatek te’siska’q pituimtlnaqn")
, (100000, "kaskimtlnaqn pituimtlnaqn")
, (123456, "kaskimtlnaqn te’siska’q jel tapuiska’q jel si’st pituimtlnaqn te’siska’q jel ne’w kaskimtlnaqn te’siska’q jel naniska’q jel asukom")
, (200000, "ta’pu kaskimtlnaqn pituimtlnaqn")
, (300000, "si’st kaskimtlnaqn pituimtlnaqn")
, (400000, "ne’w kaskimtlnaqn pituimtlnaqn")
, (500000, "na’n kaskimtlnaqn pituimtlnaqn")
, (600000, "asukom kaskimtlnaqn pituimtlnaqn")
, (654321, "asukom kaskimtlnaqn te’siska’q jel naniska’q jel ne’w pituimtlnaqn te’siska’q jel si’st kaskimtlnaqn te’siska’q jel tapuiska’q jel ne’wt")
, (700000, "l’uiknek kaskimtlnaqn pituimtlnaqn")
, (800000, "ukmuljin kaskimtlnaqn pituimtlnaqn")
, (900000, "pesqunatek kaskimtlnaqn pituimtlnaqn")
, (1000000, "kji-pituimtlnaqn")
]
)
]
| null | https://raw.githubusercontent.com/roelvandijk/numerals/b1e4121e0824ac0646a3230bd311818e159ec127/src-test/Text/Numeral/Language/MIC/TestData.hs | haskell | ------------------------------------------------------------------------------
Imports
------------------------------------------------------------------------------
------------------------------------------------------------------------------
Test data
------------------------------------------------------------------------------
Sources:
-to-count-in-micmac/en/mic/
| |
[ @ISO639 - 1@ ] -
[ @ISO639 - 2@ ] mic
[ @ISO639 - 3@ ] mic
[ @Native name@ ]
[ @English name@ ] Mi'kmaq
[@ISO639-1@] -
[@ISO639-2@] mic
[@ISO639-3@] mic
[@Native name@] Míkmawísimk
[@English name@] Mi'kmaq
-}
module Text.Numeral.Language.MIC.TestData (cardinals) where
import "numerals" Text.Numeral.Grammar ( defaultInflection )
import "this" Text.Numeral.Test ( TestData )
cardinals :: (Num i) => TestData i
cardinals =
[ ( "default"
, defaultInflection
, [ (1, "ne’wt")
, (2, "ta’pu")
, (3, "si’st")
, (4, "ne’w")
, (5, "na’n")
, (6, "asukom")
, (7, "l’uiknek")
, (8, "ukmuljin")
, (9, "pesqunatek")
, (10, "newtiska’q")
, (11, "newtiska’q jel ne’wt")
, (12, "newtiska’q jel ta’pu")
, (13, "newtiska’q jel si’st")
, (14, "newtiska’q jel ne’w")
, (15, "newtiska’q jel na’n")
, (16, "newtiska’q jel asukom")
, (17, "newtiska’q jel l’uiknek")
, (18, "newtiska’q jel ukmuljin")
, (19, "newtiska’q jel pesqunatek")
, (20, "tapuiska’q")
, (21, "tapuiska’q jel ne’wt")
, (22, "tapuiska’q jel ta’pu")
, (23, "tapuiska’q jel si’st")
, (24, "tapuiska’q jel ne’w")
, (25, "tapuiska’q jel na’n")
, (26, "tapuiska’q jel asukom")
, (27, "tapuiska’q jel l’uiknek")
, (28, "tapuiska’q jel ukmuljin")
, (29, "tapuiska’q jel pesqunatek")
, (30, "nesiska’q")
, (31, "nesiska’q jel ne’wt")
, (32, "nesiska’q jel ta’pu")
, (33, "nesiska’q jel si’st")
, (34, "nesiska’q jel ne’w")
, (35, "nesiska’q jel na’n")
, (36, "nesiska’q jel asukom")
, (37, "nesiska’q jel l’uiknek")
, (38, "nesiska’q jel ukmuljin")
, (39, "nesiska’q jel pesqunatek")
, (40, "newiska’q")
, (41, "newiska’q jel ne’wt")
, (42, "newiska’q jel ta’pu")
, (43, "newiska’q jel si’st")
, (44, "newiska’q jel ne’w")
, (45, "newiska’q jel na’n")
, (46, "newiska’q jel asukom")
, (47, "newiska’q jel l’uiknek")
, (48, "newiska’q jel ukmuljin")
, (49, "newiska’q jel pesqunatek")
, (50, "naniska’q")
, (51, "naniska’q jel ne’wt")
, (52, "naniska’q jel ta’pu")
, (53, "naniska’q jel si’st")
, (54, "naniska’q jel ne’w")
, (55, "naniska’q jel na’n")
, (56, "naniska’q jel asukom")
, (57, "naniska’q jel l’uiknek")
, (58, "naniska’q jel ukmuljin")
, (59, "naniska’q jel pesqunatek")
, (60, "asukom te’siska’q")
, (61, "asukom te’siska’q jel ne’wt")
, (62, "asukom te’siska’q jel ta’pu")
, (63, "asukom te’siska’q jel si’st")
, (64, "asukom te’siska’q jel ne’w")
, (65, "asukom te’siska’q jel na’n")
, (66, "asukom te’siska’q jel asukom")
, (67, "asukom te’siska’q jel l’uiknek")
, (68, "asukom te’siska’q jel ukmuljin")
, (69, "asukom te’siska’q jel pesqunatek")
, (70, "l’uiknek te’siska’q")
, (71, "l’uiknek te’siska’q jel ne’wt")
, (72, "l’uiknek te’siska’q jel ta’pu")
, (73, "l’uiknek te’siska’q jel si’st")
, (74, "l’uiknek te’siska’q jel ne’w")
, (75, "l’uiknek te’siska’q jel na’n")
, (76, "l’uiknek te’siska’q jel asukom")
, (77, "l’uiknek te’siska’q jel l’uiknek")
, (78, "l’uiknek te’siska’q jel ukmuljin")
, (79, "l’uiknek te’siska’q jel pesqunatek")
, (80, "ukmuljin te’siska’q")
, (81, "ukmuljin te’siska’q jel ne’wt")
, (82, "ukmuljin te’siska’q jel ta’pu")
, (83, "ukmuljin te’siska’q jel si’st")
, (84, "ukmuljin te’siska’q jel ne’w")
, (85, "ukmuljin te’siska’q jel na’n")
, (86, "ukmuljin te’siska’q jel asukom")
, (87, "ukmuljin te’siska’q jel l’uiknek")
, (88, "ukmuljin te’siska’q jel ukmuljin")
, (89, "ukmuljin te’siska’q jel pesqunatek")
, (90, "pesqunatek te’siska’q")
, (91, "pesqunatek te’siska’q jel ne’wt")
, (92, "pesqunatek te’siska’q jel ta’pu")
, (93, "pesqunatek te’siska’q jel si’st")
, (94, "pesqunatek te’siska’q jel ne’w")
, (95, "pesqunatek te’siska’q jel na’n")
, (96, "pesqunatek te’siska’q jel asukom")
, (97, "pesqunatek te’siska’q jel l’uiknek")
, (98, "pesqunatek te’siska’q jel ukmuljin")
, (99, "pesqunatek te’siska’q jel pesqunatek")
, (100, "kaskimtlnaqn")
, (101, "kaskimtlnaqn te’siska’q jel ne’wt")
, (102, "kaskimtlnaqn te’siska’q jel ta’pu")
, (103, "kaskimtlnaqn te’siska’q jel si’st")
, (104, "kaskimtlnaqn te’siska’q jel ne’w")
, (105, "kaskimtlnaqn te’siska’q jel na’n")
, (106, "kaskimtlnaqn te’siska’q jel asukom")
, (107, "kaskimtlnaqn te’siska’q jel l’uiknek")
, (108, "kaskimtlnaqn te’siska’q jel ukmuljin")
, (109, "kaskimtlnaqn te’siska’q jel pesqunatek")
, (110, "kaskimtlnaqn te’siska’q jel newtiska’q")
, (123, "kaskimtlnaqn te’siska’q jel tapuiska’q jel si’st")
, (200, "ta’pu kaskimtlnaqn")
, (300, "si’st kaskimtlnaqn")
, (321, "si’st kaskimtlnaqn te’siska’q jel tapuiska’q jel ne’wt")
, (400, "ne’w kaskimtlnaqn")
, (500, "na’n kaskimtlnaqn")
, (600, "asukom kaskimtlnaqn")
, (700, "l’uiknek kaskimtlnaqn")
, (800, "ukmuljin kaskimtlnaqn")
, (900, "pesqunatek kaskimtlnaqn")
, (909, "pesqunatek kaskimtlnaqn te’siska’q jel pesqunatek")
, (990, "pesqunatek kaskimtlnaqn te’siska’q jel pesqunatek te’siska’q")
, (999, "pesqunatek kaskimtlnaqn te’siska’q jel pesqunatek te’siska’q jel pesqunatek")
, (1000, "pituimtlnaqn")
, (1001, "pituimtlnaqn te’siska’q jel ne’wt")
, (1008, "pituimtlnaqn te’siska’q jel ukmuljin")
, (1234, "pituimtlnaqn te’siska’q jel ta’pu kaskimtlnaqn te’siska’q jel nesiska’q jel ne’w")
, (2000, "ta’pu pituimtlnaqn")
, (3000, "si’st pituimtlnaqn")
, (4000, "ne’w pituimtlnaqn")
, (4321, "ne’w pituimtlnaqn te’siska’q jel si’st kaskimtlnaqn te’siska’q jel tapuiska’q jel ne’wt")
, (5000, "na’n pituimtlnaqn")
, (6000, "asukom pituimtlnaqn")
, (7000, "l’uiknek pituimtlnaqn")
, (8000, "ukmuljin pituimtlnaqn")
, (9000, "pesqunatek pituimtlnaqn")
, (10000, "pituimtlnaqnepikatun")
, (12345, "newtiska’q jel ta’pu pituimtlnaqn te’siska’q jel si’st kaskimtlnaqn te’siska’q jel newiska’q jel na’n")
, (20000, "tapuiska’q pituimtlnaqn")
, (30000, "nesiska’q pituimtlnaqn")
, (40000, "newiska’q pituimtlnaqn")
, (50000, "naniska’q pituimtlnaqn")
, (54321, "naniska’q jel ne’w pituimtlnaqn te’siska’q jel si’st kaskimtlnaqn te’siska’q jel tapuiska’q jel ne’wt")
, (60000, "asukom te’siska’q pituimtlnaqn")
, (70000, "l’uiknek te’siska’q pituimtlnaqn")
, (80000, "ukmuljin te’siska’q pituimtlnaqn")
, (90000, "pesqunatek te’siska’q pituimtlnaqn")
, (100000, "kaskimtlnaqn pituimtlnaqn")
, (123456, "kaskimtlnaqn te’siska’q jel tapuiska’q jel si’st pituimtlnaqn te’siska’q jel ne’w kaskimtlnaqn te’siska’q jel naniska’q jel asukom")
, (200000, "ta’pu kaskimtlnaqn pituimtlnaqn")
, (300000, "si’st kaskimtlnaqn pituimtlnaqn")
, (400000, "ne’w kaskimtlnaqn pituimtlnaqn")
, (500000, "na’n kaskimtlnaqn pituimtlnaqn")
, (600000, "asukom kaskimtlnaqn pituimtlnaqn")
, (654321, "asukom kaskimtlnaqn te’siska’q jel naniska’q jel ne’w pituimtlnaqn te’siska’q jel si’st kaskimtlnaqn te’siska’q jel tapuiska’q jel ne’wt")
, (700000, "l’uiknek kaskimtlnaqn pituimtlnaqn")
, (800000, "ukmuljin kaskimtlnaqn pituimtlnaqn")
, (900000, "pesqunatek kaskimtlnaqn pituimtlnaqn")
, (1000000, "kji-pituimtlnaqn")
]
)
]
|
0a66424887bf6a91e6596ab5f717b890d3080edd9f6affeb8198b6142df5e78b | tud-fop/vanda-haskell | PBSM.hs | -----------------------------------------------------------------------------
-- |
-- Module : VandaCLI.PBSM
Copyright : ( c ) Technische Universität Dresden 2014
-- License : BSD-style
--
-- Maintainer :
-- Stability : unknown
-- Portability : portable
-----------------------------------------------------------------------------
{-# LANGUAGE DeriveDataTypeable, RecordWildCards #-}
# OPTIONS_GHC -fno - warn - missing - fields #
module VandaCLI.PBSM where
import Vanda.Algorithms.Earley.WSA as WSA (fromList)
import Vanda.Algorithms.Earley
import Vanda.Algorithms.ExpectationMaximizationAcyclic
import Vanda.Algorithms.InsideOutsideWeightsAcyclic
import Vanda.Corpus.Penn.Text
import qualified Vanda.Features as F
import Vanda.Hypergraph
import Vanda.PBSM.PatternBasedStateMerging
import Vanda.PBSM.Types
import Vanda.Util.Memorysavers
import Vanda.Util.IO
import qualified Vanda.Util.Tree as T
import Control.Applicative ((<$>))
import Control.DeepSeq
import Control.Monad
import Data.Binary (decodeFile, encodeFile)
import Data.List
import qualified Data.Map.Lazy as M
import qualified Data.Text.Lazy.IO as Text
import qualified Data.Tree as T
import Data.Tuple
import qualified Data.Vector as V
import System.Console.CmdArgs
import System.Directory
import System.FilePath ((</>), (<.>))
import System.Posix.Signals (sigUSR1)
import Debug.Trace
data Args
= PrepareTreebank
{ argWorktree :: FilePath
, argTreebank :: FilePath
, argDefoliate :: Bool
}
| PBSM
{ argWorktree :: FilePath
-- , argTreebank1 :: FilePath
-- , argTreebank2 :: FilePath
, : :
}
| EM
{ argWorktree :: FilePath
}
| Parse
{ argWorktree :: FilePath
}
deriving (Data, Show, Typeable)
argModes :: [Args]
argModes
= [ PrepareTreebank
{ argWorktree = "" &= explicit &= name "work-tree" &= typDir
&= help "work in DIR instead of current directory"
, argTreebank = def &= argPos 0 &= typ "TREEBANK"
, argDefoliate = False &= explicit &= name "defoliate"
&= help "remove leafs from treebank trees, e.g. to just deal \
\with preterminals"
} &= details
[ ""
]
, PBSM
{ -- argTreebank1 = def &= argPos 0 &= typ "TREEBANK1"
, argTreebank2 = def & = argPos 1 & = typ " TREEBANK2 "
, argDefoliate = False & = explicit & = name " defoliate "
-- &= help "remove leafs from treebank trees, e.g. to just deal \
-- \with preterminals"
} &= details
[ "Read-off a grammar from TREEBANK1 and generalize it using TREEBANK2."
]
, EM {} &= details
[ "Add weights to the pbsm result using the EM algorithm."
]
, Parse {} &= details
[ "Parse newline-separated sentences from standard input."
]
]
&= helpArg [name "h"]
&= versionArg [ignore]
&= program "command"
&= summary "Pattern-Based State Merging"
type FileGrammar = RTG Int String
type FileTreebank = [T.Tree String]
type FileWeights = M.Map Int Double
initialNT :: Int ; initialNT = 0
initialT :: String; initialT = "ROOT"
main :: IO ()
main = do
arguments <- cmdArgs (modes argModes)
print arguments
createDirectoryIfMissing True (argWorktree arguments)
let fileTreebank1 = argWorktree arguments </> "treebank1.bin"
fileTreebank2 = argWorktree arguments </> "treebank2.bin"
fileGrammar = argWorktree arguments </> "grammar.bin"
fileWeights = argWorktree arguments </> "weights.bin"
let loadCorpus defol c
= map (if defol then T.defoliate else id)
. parsePenn
<$> Text.readFile c
case arguments of
PrepareTreebank { .. } -> do
(c1, c2, m) <- partitionTreebank <$> loadCorpus argDefoliate argTreebank
encodeFile fileTreebank1 (c1 :: FileTreebank)
encodeFile fileTreebank2 (c2 :: FileTreebank)
putStr
$ unlines
$ map (\ (n, x) -> show n ++ "\t" ++ show x)
$ sort
$ map swap
$ M.toList m
PBSM { .. } -> do
[c1, c2] <- unify3 <$> mapM decodeFile [fileTreebank1, fileTreebank2]
:: IO [FileTreebank]
let g = generalize
head
(fst $ intifyNonterminals $ forestToGrammar c1)
(traceForestTreesOnEvaluation c2)
if null fileGrammar
then print g
else encodeFile fileGrammar (g :: FileGrammar)
EM { .. } -> do -- from now on everything is start-separated
g <- decodeFile fileGrammar :: IO FileGrammar
c <- force . unify2 . concat
<$> mapM decodeFile [fileTreebank1, fileTreebank2]
:: IO FileTreebank
(concat <$> mapM decodeFile [fileTreebank1, fileTreebank2] :: IO FileTreebank)
>>= (\ l -> putStrLn $ "Corpus contains " ++ show l ++ " trees.") . length
let hg :: EdgeList Int String Int
hg = toHypergraphStartSeparated initialNT initialT g
part = M.elems $ M.fromListWith (++) $ map (\ e -> (to e, [ident e])) $ edges hg
hgs = map (dropUnreach [initialNT] . parseTree hg . startSepTree) c
where startSepTree tree = T.Node initialT [tree]
w0 = M.singleton initialNT (1 :: Double)
ws = M.fromList $ flip zip (repeat 1) $ map ident $ edges hg
let worker update =
forM_ (zip [0 :: Int ..] $ take 5 $ forestEMlist part (zip hgs (repeat 1)) ident w0 ws)
$ \ (i, (lklhood, (_, ws'))) -> do
putStrLn $ "EM step " ++ show i ++ "; Likelihood: " ++ show lklhood
update (i, ws')
handler (i, ws') = do
putStrLn $ "Saving result of EM step " ++ show i ++ " ..."
encodeFile (fileWeights <.> "tmp") (ws' :: FileWeights)
renameFile (fileWeights <.> "tmp") fileWeights
putStrLn $ " ... result of EM step " ++ show i ++ " saved."
handleOnDemand Nothing Nothing [sigUSR1] worker {-handler-}(\ _ -> return ())
Parse { .. } -> do
g <- decodeFile fileGrammar :: IO FileGrammar
let hg :: BackwardStar Int (String, Int) Int
hg = toHypergraphStartSeparatedRanked initialNT initialT g
comp :: (t, Int) -> [Either Int t]
comp (t, k) = if k == 0 then [Right t] else map Left [0 .. k - 1]
weightM <- decodeFile fileWeights :: IO FileWeights
let lkup i = M.findWithDefault err i weightM
err = errorModule "main.lkup: vertex not found"
let feature = F.Feature (\ _ (i, _) xs -> lkup i * product xs) V.singleton
sents <- map words . lines <$> getContents
forM_ sents $ \ sent -> do
let (ntM, hg', weights') = earley hg comp (WSA.fromList 1 sent) initialNT
putStrLn $ unlines $ map show $ edges hg'
print weights'
let bs = bests hg' feature (V.singleton 1)
case M.lookup (0, initialNT, length sent) ntM of
Nothing -> putStrLn "No parse."
Just nt ->
putStr
$ unlines
$ map (drawTree . fmap (fst . label) . F.deriv)
$ take 10
$ bs M.! nt
partitionTreebank
:: Ord a => [T.Tree a] -> ([T.Tree a], [T.Tree a], M.Map (a, Int) Int)
partitionTreebank = go M.empty
where
go labelM [] = ([], [], labelM)
go labelM (t : ts)
= if all (\ k -> M.findWithDefault 0 k labelM > 0) labels
then (ts1, t : ts2, labelM')
else (t : ts1, ts2, labelM')
where
labels = T.flattenRanked t
(ts1, ts2, labelM')
= go (foldl' (\ m k -> M.insertWith (+) k 1 m) labelM labels) ts
traceForestTreesOnEvaluation :: [T.Tree String] -> [T.Tree String]
traceForestTreesOnEvaluation
= zipWith zipper [1 :: Int ..]
where
zipper i t = trace msg t
where msg = "<Tree " ++ show i ++ ">\n"
++ drawTree t
++ unwords (T.yield t)
++ "\n</Tree " ++ show i ++ ">"
drawTree :: T.Tree String -> String
drawTree = T.drawTree' (T.drawstyleCompact1 "─╴") . T.mapLeafs colorVividYellow
where colorVividYellow :: String -> String
colorVividYellow cs = "\ESC[93m" ++ cs ++ "\ESC[m"
errorModule :: String -> a
errorModule = error . ("Vanda.PBSM.Main." ++)
| null | https://raw.githubusercontent.com/tud-fop/vanda-haskell/3214966361b6dbf178155950c94423eee7f9453e/executable/VandaCLI/PBSM.hs | haskell | ---------------------------------------------------------------------------
|
Module : VandaCLI.PBSM
License : BSD-style
Maintainer :
Stability : unknown
Portability : portable
---------------------------------------------------------------------------
# LANGUAGE DeriveDataTypeable, RecordWildCards #
, argTreebank1 :: FilePath
, argTreebank2 :: FilePath
argTreebank1 = def &= argPos 0 &= typ "TREEBANK1"
&= help "remove leafs from treebank trees, e.g. to just deal \
\with preterminals"
from now on everything is start-separated
handler | Copyright : ( c ) Technische Universität Dresden 2014
# OPTIONS_GHC -fno - warn - missing - fields #
module VandaCLI.PBSM where
import Vanda.Algorithms.Earley.WSA as WSA (fromList)
import Vanda.Algorithms.Earley
import Vanda.Algorithms.ExpectationMaximizationAcyclic
import Vanda.Algorithms.InsideOutsideWeightsAcyclic
import Vanda.Corpus.Penn.Text
import qualified Vanda.Features as F
import Vanda.Hypergraph
import Vanda.PBSM.PatternBasedStateMerging
import Vanda.PBSM.Types
import Vanda.Util.Memorysavers
import Vanda.Util.IO
import qualified Vanda.Util.Tree as T
import Control.Applicative ((<$>))
import Control.DeepSeq
import Control.Monad
import Data.Binary (decodeFile, encodeFile)
import Data.List
import qualified Data.Map.Lazy as M
import qualified Data.Text.Lazy.IO as Text
import qualified Data.Tree as T
import Data.Tuple
import qualified Data.Vector as V
import System.Console.CmdArgs
import System.Directory
import System.FilePath ((</>), (<.>))
import System.Posix.Signals (sigUSR1)
import Debug.Trace
data Args
= PrepareTreebank
{ argWorktree :: FilePath
, argTreebank :: FilePath
, argDefoliate :: Bool
}
| PBSM
{ argWorktree :: FilePath
, : :
}
| EM
{ argWorktree :: FilePath
}
| Parse
{ argWorktree :: FilePath
}
deriving (Data, Show, Typeable)
argModes :: [Args]
argModes
= [ PrepareTreebank
{ argWorktree = "" &= explicit &= name "work-tree" &= typDir
&= help "work in DIR instead of current directory"
, argTreebank = def &= argPos 0 &= typ "TREEBANK"
, argDefoliate = False &= explicit &= name "defoliate"
&= help "remove leafs from treebank trees, e.g. to just deal \
\with preterminals"
} &= details
[ ""
]
, PBSM
, argTreebank2 = def & = argPos 1 & = typ " TREEBANK2 "
, argDefoliate = False & = explicit & = name " defoliate "
} &= details
[ "Read-off a grammar from TREEBANK1 and generalize it using TREEBANK2."
]
, EM {} &= details
[ "Add weights to the pbsm result using the EM algorithm."
]
, Parse {} &= details
[ "Parse newline-separated sentences from standard input."
]
]
&= helpArg [name "h"]
&= versionArg [ignore]
&= program "command"
&= summary "Pattern-Based State Merging"
type FileGrammar = RTG Int String
type FileTreebank = [T.Tree String]
type FileWeights = M.Map Int Double
initialNT :: Int ; initialNT = 0
initialT :: String; initialT = "ROOT"
main :: IO ()
main = do
arguments <- cmdArgs (modes argModes)
print arguments
createDirectoryIfMissing True (argWorktree arguments)
let fileTreebank1 = argWorktree arguments </> "treebank1.bin"
fileTreebank2 = argWorktree arguments </> "treebank2.bin"
fileGrammar = argWorktree arguments </> "grammar.bin"
fileWeights = argWorktree arguments </> "weights.bin"
let loadCorpus defol c
= map (if defol then T.defoliate else id)
. parsePenn
<$> Text.readFile c
case arguments of
PrepareTreebank { .. } -> do
(c1, c2, m) <- partitionTreebank <$> loadCorpus argDefoliate argTreebank
encodeFile fileTreebank1 (c1 :: FileTreebank)
encodeFile fileTreebank2 (c2 :: FileTreebank)
putStr
$ unlines
$ map (\ (n, x) -> show n ++ "\t" ++ show x)
$ sort
$ map swap
$ M.toList m
PBSM { .. } -> do
[c1, c2] <- unify3 <$> mapM decodeFile [fileTreebank1, fileTreebank2]
:: IO [FileTreebank]
let g = generalize
head
(fst $ intifyNonterminals $ forestToGrammar c1)
(traceForestTreesOnEvaluation c2)
if null fileGrammar
then print g
else encodeFile fileGrammar (g :: FileGrammar)
g <- decodeFile fileGrammar :: IO FileGrammar
c <- force . unify2 . concat
<$> mapM decodeFile [fileTreebank1, fileTreebank2]
:: IO FileTreebank
(concat <$> mapM decodeFile [fileTreebank1, fileTreebank2] :: IO FileTreebank)
>>= (\ l -> putStrLn $ "Corpus contains " ++ show l ++ " trees.") . length
let hg :: EdgeList Int String Int
hg = toHypergraphStartSeparated initialNT initialT g
part = M.elems $ M.fromListWith (++) $ map (\ e -> (to e, [ident e])) $ edges hg
hgs = map (dropUnreach [initialNT] . parseTree hg . startSepTree) c
where startSepTree tree = T.Node initialT [tree]
w0 = M.singleton initialNT (1 :: Double)
ws = M.fromList $ flip zip (repeat 1) $ map ident $ edges hg
let worker update =
forM_ (zip [0 :: Int ..] $ take 5 $ forestEMlist part (zip hgs (repeat 1)) ident w0 ws)
$ \ (i, (lklhood, (_, ws'))) -> do
putStrLn $ "EM step " ++ show i ++ "; Likelihood: " ++ show lklhood
update (i, ws')
handler (i, ws') = do
putStrLn $ "Saving result of EM step " ++ show i ++ " ..."
encodeFile (fileWeights <.> "tmp") (ws' :: FileWeights)
renameFile (fileWeights <.> "tmp") fileWeights
putStrLn $ " ... result of EM step " ++ show i ++ " saved."
Parse { .. } -> do
g <- decodeFile fileGrammar :: IO FileGrammar
let hg :: BackwardStar Int (String, Int) Int
hg = toHypergraphStartSeparatedRanked initialNT initialT g
comp :: (t, Int) -> [Either Int t]
comp (t, k) = if k == 0 then [Right t] else map Left [0 .. k - 1]
weightM <- decodeFile fileWeights :: IO FileWeights
let lkup i = M.findWithDefault err i weightM
err = errorModule "main.lkup: vertex not found"
let feature = F.Feature (\ _ (i, _) xs -> lkup i * product xs) V.singleton
sents <- map words . lines <$> getContents
forM_ sents $ \ sent -> do
let (ntM, hg', weights') = earley hg comp (WSA.fromList 1 sent) initialNT
putStrLn $ unlines $ map show $ edges hg'
print weights'
let bs = bests hg' feature (V.singleton 1)
case M.lookup (0, initialNT, length sent) ntM of
Nothing -> putStrLn "No parse."
Just nt ->
putStr
$ unlines
$ map (drawTree . fmap (fst . label) . F.deriv)
$ take 10
$ bs M.! nt
partitionTreebank
:: Ord a => [T.Tree a] -> ([T.Tree a], [T.Tree a], M.Map (a, Int) Int)
partitionTreebank = go M.empty
where
go labelM [] = ([], [], labelM)
go labelM (t : ts)
= if all (\ k -> M.findWithDefault 0 k labelM > 0) labels
then (ts1, t : ts2, labelM')
else (t : ts1, ts2, labelM')
where
labels = T.flattenRanked t
(ts1, ts2, labelM')
= go (foldl' (\ m k -> M.insertWith (+) k 1 m) labelM labels) ts
traceForestTreesOnEvaluation :: [T.Tree String] -> [T.Tree String]
traceForestTreesOnEvaluation
= zipWith zipper [1 :: Int ..]
where
zipper i t = trace msg t
where msg = "<Tree " ++ show i ++ ">\n"
++ drawTree t
++ unwords (T.yield t)
++ "\n</Tree " ++ show i ++ ">"
drawTree :: T.Tree String -> String
drawTree = T.drawTree' (T.drawstyleCompact1 "─╴") . T.mapLeafs colorVividYellow
where colorVividYellow :: String -> String
colorVividYellow cs = "\ESC[93m" ++ cs ++ "\ESC[m"
errorModule :: String -> a
errorModule = error . ("Vanda.PBSM.Main." ++)
|
e0f474601291e814f30220313a49d0ce1572128809addcd74a5bdf80659fb794 | dlowe-net/orcabot | slogan-grammar.lisp | (sentence -> (or ("You get better inside" thing ".")
("Stay cool with" thing ".")
("For the love of" thing ".")
(thing "for a professional image.")
(thing ", stay in touch.")
(thing "'s got it all!")
("I quit smoking with" thing ".")
("Everyone loves" thing ".")
("High life with" thing ".")
("I believe in" thing ".")
(thing "is a never ending story.")
("Don't get in the way of" thing ".")
("The goddess made" thing ".")
(thing "is what the world was waiting for.")
(thing "after a long day.")
("One goal, one passion -" thing ".")
("Enjoy" thing ".")
(thing "- simplified!")
("Everything is simple with" thing ".")
(thing "for you!")
("I wish I had a" thing ".")
("Think different, think" thing ".")
("The president buys" thing ".")
("Make the world a better place with" thing ".")
(thing "evolution.")
("Feel it -" thing "!")
(thing "... whatever you want.")
("The Power of" thing ".")
(thing "innovate your world")
("My" thing "beats everything.")
("Let's" thing "!")
("The" thing "spirit.")
("I trust" thing ".")
(thing ", one for all.")
("Be young, have fun, taste" thing)
(thing ", created by nature.")
(thing "for the masses.")
(thing ", good.")
(thing "wanted.")
("No" thing ", no kiss.")
("The ideal" thing)
(thing ", pure lust.")
("I lost weight with" thing)
("Connect with" thing)
("The" thing "community.")
("Way to go," thing "!")
(thing "Dreamteam.")
("Go far with" thing)
("Live" thing)
("Don't worry," thing "takes care.")
("It's my" thing "!")
(thing "for your kids!")
("Do you know" thing "?")
("Go farther with" thing)
(thing "- your game.")
(thing "for a professional image.")))
| null | https://raw.githubusercontent.com/dlowe-net/orcabot/bf3c799337531e6b16086e8105906cc9f8808313/data/slogan-grammar.lisp | lisp | (sentence -> (or ("You get better inside" thing ".")
("Stay cool with" thing ".")
("For the love of" thing ".")
(thing "for a professional image.")
(thing ", stay in touch.")
(thing "'s got it all!")
("I quit smoking with" thing ".")
("Everyone loves" thing ".")
("High life with" thing ".")
("I believe in" thing ".")
(thing "is a never ending story.")
("Don't get in the way of" thing ".")
("The goddess made" thing ".")
(thing "is what the world was waiting for.")
(thing "after a long day.")
("One goal, one passion -" thing ".")
("Enjoy" thing ".")
(thing "- simplified!")
("Everything is simple with" thing ".")
(thing "for you!")
("I wish I had a" thing ".")
("Think different, think" thing ".")
("The president buys" thing ".")
("Make the world a better place with" thing ".")
(thing "evolution.")
("Feel it -" thing "!")
(thing "... whatever you want.")
("The Power of" thing ".")
(thing "innovate your world")
("My" thing "beats everything.")
("Let's" thing "!")
("The" thing "spirit.")
("I trust" thing ".")
(thing ", one for all.")
("Be young, have fun, taste" thing)
(thing ", created by nature.")
(thing "for the masses.")
(thing ", good.")
(thing "wanted.")
("No" thing ", no kiss.")
("The ideal" thing)
(thing ", pure lust.")
("I lost weight with" thing)
("Connect with" thing)
("The" thing "community.")
("Way to go," thing "!")
(thing "Dreamteam.")
("Go far with" thing)
("Live" thing)
("Don't worry," thing "takes care.")
("It's my" thing "!")
(thing "for your kids!")
("Do you know" thing "?")
("Go farther with" thing)
(thing "- your game.")
(thing "for a professional image.")))
| |
5f485079848cf5f03596ad9da0c857f63aa562db99c00a859e65187c99b5910d | gfredericks/test.chuck | generators_test.cljc | (ns com.gfredericks.test.chuck.generators-test
(:require [clojure.test.check.clojure-test :refer [defspec]]
[clojure.test.check.generators :as gen]
[#?(:clj clj-time.core :cljs cljs-time.core) :as ct]
[clojure.test.check.properties :as prop]
[com.gfredericks.test.chuck.generators :as gen']))
(def lists-and-counts
(gen'/for [nums (gen/vector gen/nat)
:let [cardinality (count nums)]]
[nums cardinality]))
(defspec for-works-correctly 100
(prop/for-all [[nums cardinality] lists-and-counts]
(= (count nums) cardinality)))
(defspec for-accepts-empty-bindings 100
(prop/for-all [x (gen'/for [] 42)]
(= x 42)))
(def lists-with-two-of-their-elements
(gen'/for [nums (gen/vector gen/nat)
:let [cardinality (count nums)]
:when (> cardinality 1)
x (gen/elements nums)
:let [[befores [_x & afters]] (split-with #(not= % x) nums)
nums-x (concat befores afters)]
y (gen/elements nums-x)]
[nums x y]))
(defspec complex-for-works-correctly 100
(prop/for-all [[nums x y] lists-with-two-of-their-elements]
(let [f (frequencies nums)]
;; check that both x and y are in the list
(or (and (= x y) (> (f x) 1))
(and (not= x y) (pos? (f x)) (pos? (f y)))))))
(def destructuring-usage
(gen'/for [{:keys [foo]} (gen/hash-map :foo gen/nat)
:let [unused-binding 42]
vs (gen/vector gen/boolean foo)]
[foo vs]))
(defspec destructuring-usage-spec 100
(prop/for-all [[n vs] destructuring-usage]
(= n (count vs))))
(def parallel-usage
(gen'/for [:parallel [x gen/nat
y gen/boolean]]
[x y]))
(defspec parallel-usage-spec 100
(prop/for-all [[x y] parallel-usage]
(and (>= x 0)
(or (= true y) (= false y)))))
(def parallel-as-second-clause
(gen'/for [n gen/nat
:parallel [v1 (gen/vector gen/boolean n)
v2 (gen/vector gen/boolean n)]]
[n (concat v1 v2)]))
(defspec parallel-as-second-clause-spec 100
(prop/for-all [[n v] parallel-as-second-clause]
(= (* 2 n) (count v))))
(defspec bounded-int-generates-bounded-ints 500
(let [large-int (gen/choose -200000000 200000000)
g (gen/bind (gen/tuple large-int large-int)
(fn [pair]
(let [[low high] (sort pair)]
(gen/tuple (gen/return low)
(gen/return high)
(gen'/bounded-int low high)))))]
(prop/for-all [[low high n] g]
(<= low n high))))
; TODO: improve the cljs tests for gen'/double
(defspec double-generates-doubles 100
(prop/for-all [x gen'/double]
#?(:clj (instance? Double x)
:cljs (= js/Number (type x)))))
(defspec subset-in-set 100
(prop/for-all [s (gen'/subset (range 10))]
(every? (set (range 10)) s)))
(defn subsequence?
"Checks if xs is a subsequence of ys."
[xs ys]
(or (empty? xs)
(and (seq ys)
(= (first xs) (first ys))
(subsequence? (rest xs) (rest ys)))
(and (seq ys)
(subsequence? xs (rest ys)))))
(def subsequence-gen
(gen'/for [ys (gen/list gen/nat)
xs (gen'/subsequence ys)]
[xs ys]))
(defspec subsequence-spec 100
(prop/for-all [[xs ys] subsequence-gen]
(subsequence? xs ys)))
(def sub-map-gen
(gen'/for [m (gen/map gen/string-alphanumeric gen/nat)
sm (gen'/sub-map m)]
[m sm]))
(defspec sub-map-spec 100
(prop/for-all [[m sm] sub-map-gen]
(every? #(= (find m (key %))
%)
sm)))
(defspec datetime-spec 100000
(prop/for-all [dt (gen'/datetime {:offset-min 0
:offset-max 100
:offset-fns [ct/millis ct/seconds ct/minutes ct/hours ct/days ct/months]})]
(ct/within? (ct/date-time 2000)
(ct/date-time 2009)
dt)))
(defn valid-bounded-rec-struct?
[breadth height coll]
(if (not-any? coll? coll)
(and (<= (count coll) breadth)
(or (zero? height) (pos? height)))
(and (<= (count coll) breadth)
(every? identity (map (partial valid-bounded-rec-struct?
breadth
(dec height))
coll)))))
(defspec bounded-recursive-gen-spec 100
(prop/for-all
[bounded-rec (gen'/bounded-recursive-gen gen/vector
gen/int
10
5)]
(valid-bounded-rec-struct? 10 5 bounded-rec)))
| null | https://raw.githubusercontent.com/gfredericks/test.chuck/9f6f33db6cc1ac8b172f20a45e8f13e34ac3c6f2/test/com/gfredericks/test/chuck/generators_test.cljc | clojure | check that both x and y are in the list
TODO: improve the cljs tests for gen'/double | (ns com.gfredericks.test.chuck.generators-test
(:require [clojure.test.check.clojure-test :refer [defspec]]
[clojure.test.check.generators :as gen]
[#?(:clj clj-time.core :cljs cljs-time.core) :as ct]
[clojure.test.check.properties :as prop]
[com.gfredericks.test.chuck.generators :as gen']))
(def lists-and-counts
(gen'/for [nums (gen/vector gen/nat)
:let [cardinality (count nums)]]
[nums cardinality]))
(defspec for-works-correctly 100
(prop/for-all [[nums cardinality] lists-and-counts]
(= (count nums) cardinality)))
(defspec for-accepts-empty-bindings 100
(prop/for-all [x (gen'/for [] 42)]
(= x 42)))
(def lists-with-two-of-their-elements
(gen'/for [nums (gen/vector gen/nat)
:let [cardinality (count nums)]
:when (> cardinality 1)
x (gen/elements nums)
:let [[befores [_x & afters]] (split-with #(not= % x) nums)
nums-x (concat befores afters)]
y (gen/elements nums-x)]
[nums x y]))
(defspec complex-for-works-correctly 100
(prop/for-all [[nums x y] lists-with-two-of-their-elements]
(let [f (frequencies nums)]
(or (and (= x y) (> (f x) 1))
(and (not= x y) (pos? (f x)) (pos? (f y)))))))
(def destructuring-usage
(gen'/for [{:keys [foo]} (gen/hash-map :foo gen/nat)
:let [unused-binding 42]
vs (gen/vector gen/boolean foo)]
[foo vs]))
(defspec destructuring-usage-spec 100
(prop/for-all [[n vs] destructuring-usage]
(= n (count vs))))
(def parallel-usage
(gen'/for [:parallel [x gen/nat
y gen/boolean]]
[x y]))
(defspec parallel-usage-spec 100
(prop/for-all [[x y] parallel-usage]
(and (>= x 0)
(or (= true y) (= false y)))))
(def parallel-as-second-clause
(gen'/for [n gen/nat
:parallel [v1 (gen/vector gen/boolean n)
v2 (gen/vector gen/boolean n)]]
[n (concat v1 v2)]))
(defspec parallel-as-second-clause-spec 100
(prop/for-all [[n v] parallel-as-second-clause]
(= (* 2 n) (count v))))
(defspec bounded-int-generates-bounded-ints 500
(let [large-int (gen/choose -200000000 200000000)
g (gen/bind (gen/tuple large-int large-int)
(fn [pair]
(let [[low high] (sort pair)]
(gen/tuple (gen/return low)
(gen/return high)
(gen'/bounded-int low high)))))]
(prop/for-all [[low high n] g]
(<= low n high))))
(defspec double-generates-doubles 100
(prop/for-all [x gen'/double]
#?(:clj (instance? Double x)
:cljs (= js/Number (type x)))))
(defspec subset-in-set 100
(prop/for-all [s (gen'/subset (range 10))]
(every? (set (range 10)) s)))
(defn subsequence?
"Checks if xs is a subsequence of ys."
[xs ys]
(or (empty? xs)
(and (seq ys)
(= (first xs) (first ys))
(subsequence? (rest xs) (rest ys)))
(and (seq ys)
(subsequence? xs (rest ys)))))
(def subsequence-gen
(gen'/for [ys (gen/list gen/nat)
xs (gen'/subsequence ys)]
[xs ys]))
(defspec subsequence-spec 100
(prop/for-all [[xs ys] subsequence-gen]
(subsequence? xs ys)))
(def sub-map-gen
(gen'/for [m (gen/map gen/string-alphanumeric gen/nat)
sm (gen'/sub-map m)]
[m sm]))
(defspec sub-map-spec 100
(prop/for-all [[m sm] sub-map-gen]
(every? #(= (find m (key %))
%)
sm)))
(defspec datetime-spec 100000
(prop/for-all [dt (gen'/datetime {:offset-min 0
:offset-max 100
:offset-fns [ct/millis ct/seconds ct/minutes ct/hours ct/days ct/months]})]
(ct/within? (ct/date-time 2000)
(ct/date-time 2009)
dt)))
(defn valid-bounded-rec-struct?
[breadth height coll]
(if (not-any? coll? coll)
(and (<= (count coll) breadth)
(or (zero? height) (pos? height)))
(and (<= (count coll) breadth)
(every? identity (map (partial valid-bounded-rec-struct?
breadth
(dec height))
coll)))))
(defspec bounded-recursive-gen-spec 100
(prop/for-all
[bounded-rec (gen'/bounded-recursive-gen gen/vector
gen/int
10
5)]
(valid-bounded-rec-struct? 10 5 bounded-rec)))
|
08c4729eeda676b4b1c94b2253771b2296fedf0d8b92429599420aa93a0fbf8f | namin/inc | compiler-tests.scm | (load "compiler.scm")
(when enable-boot-tests
(unless enable-cps
(load "tests-6.7-req.scm")
(load "tests-6.6-req.scm")
(load "tests-6.5-req.scm")
(load "tests-6.4-req.scm"))
(load "tests-6.4.2-req.scm")
(load "tests-6.4.1-req.scm")
(load "tests-6.3-req.scm")
(load "tests-6.2-req.scm")
(load "tests-6.1-req.scm"))
(when enable-cps
(load "tests-5.3-req.scm"))
(load "tests-5.2-req.scm")
(load "tests-4.2-req.scm")
(load "tests-4.1-req.scm")
(load "tests-3.4-req.scm")
(load "tests-3.3-req.scm")
(load "tests-3.2-req.scm")
(load "tests-3.1-req.scm")
(load "tests-2.9-req.scm")
(load "tests-2.8-req.scm")
(load "tests-2.6-req.scm")
(load "tests-2.4-req.scm")
(load "tests-2.3-req.scm")
(load "tests-2.2-req.scm")
(load "tests-2.1-req.scm")
(load "tests-1.9-req.scm")
(load "tests-1.8-req.scm")
(load "tests-1.7-req.scm")
(load "tests-1.6-opt.scm")
(load "tests-1.6-req.scm")
(load "tests-1.5-req.scm")
(load "tests-1.4-req.scm")
(load "tests-1.3-req.scm")
(load "tests-1.2-req.scm")
(load "tests-1.1-req.scm")
| null | https://raw.githubusercontent.com/namin/inc/3f683935e290848485f8d4d165a4f727f6658d1d/src/compiler-tests.scm | scheme | (load "compiler.scm")
(when enable-boot-tests
(unless enable-cps
(load "tests-6.7-req.scm")
(load "tests-6.6-req.scm")
(load "tests-6.5-req.scm")
(load "tests-6.4-req.scm"))
(load "tests-6.4.2-req.scm")
(load "tests-6.4.1-req.scm")
(load "tests-6.3-req.scm")
(load "tests-6.2-req.scm")
(load "tests-6.1-req.scm"))
(when enable-cps
(load "tests-5.3-req.scm"))
(load "tests-5.2-req.scm")
(load "tests-4.2-req.scm")
(load "tests-4.1-req.scm")
(load "tests-3.4-req.scm")
(load "tests-3.3-req.scm")
(load "tests-3.2-req.scm")
(load "tests-3.1-req.scm")
(load "tests-2.9-req.scm")
(load "tests-2.8-req.scm")
(load "tests-2.6-req.scm")
(load "tests-2.4-req.scm")
(load "tests-2.3-req.scm")
(load "tests-2.2-req.scm")
(load "tests-2.1-req.scm")
(load "tests-1.9-req.scm")
(load "tests-1.8-req.scm")
(load "tests-1.7-req.scm")
(load "tests-1.6-opt.scm")
(load "tests-1.6-req.scm")
(load "tests-1.5-req.scm")
(load "tests-1.4-req.scm")
(load "tests-1.3-req.scm")
(load "tests-1.2-req.scm")
(load "tests-1.1-req.scm")
| |
728ad90a50e46c79b4d0e25a8c4e78426f0be7192bea814a8c72b0412f91bcc8 | wireless-net/erlang-nommu | sshc_sup.erl | %%
%% %CopyrightBegin%
%%
Copyright Ericsson AB 2008 - 2013 . All Rights Reserved .
%%
The contents of this file are subject to the Erlang Public License ,
Version 1.1 , ( the " License " ) ; you may not use this file except in
%% compliance with the License. You should have received a copy of the
%% Erlang Public License along with this software. If not, it can be
%% retrieved online at /.
%%
Software distributed under the License is distributed on an " AS IS "
%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
%% the License for the specific language governing rights and limitations
%% under the License.
%%
%% %CopyrightEnd%
%%
%%
%%----------------------------------------------------------------------
%% Purpose: The ssh client subsystem supervisor
%%----------------------------------------------------------------------
-module(sshc_sup).
-behaviour(supervisor).
-export([start_link/1, start_child/1, stop_child/1]).
%% Supervisor callback
-export([init/1]).
%%%=========================================================================
%%% API
%%%=========================================================================
start_link(Args) ->
supervisor:start_link({local, ?MODULE}, ?MODULE, [Args]).
start_child(Args) ->
supervisor:start_child(?MODULE, Args).
stop_child(Client) ->
spawn(fun() ->
ClientSup = whereis(?MODULE),
supervisor:terminate_child(ClientSup, Client)
end),
ok.
%%%=========================================================================
%%% Supervisor callback
%%%=========================================================================
init(Args) ->
RestartStrategy = simple_one_for_one,
MaxR = 0,
MaxT = 3600,
{ok, {{RestartStrategy, MaxR, MaxT}, [child_spec(Args)]}}.
%%%=========================================================================
%%% Internal functions
%%%=========================================================================
child_spec(_) ->
Name = undefined, % As simple_one_for_one is used.
StartFunc = {ssh_connection_handler, start_link, []},
Restart = temporary,
Shutdown = infinity,
Modules = [ssh_connection_handler],
Type = supervisor,
{Name, StartFunc, Restart, Shutdown, Type, Modules}.
| null | https://raw.githubusercontent.com/wireless-net/erlang-nommu/79f32f81418e022d8ad8e0e447deaea407289926/lib/ssh/src/sshc_sup.erl | erlang |
%CopyrightBegin%
compliance with the License. You should have received a copy of the
Erlang Public License along with this software. If not, it can be
retrieved online at /.
basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
the License for the specific language governing rights and limitations
under the License.
%CopyrightEnd%
----------------------------------------------------------------------
Purpose: The ssh client subsystem supervisor
----------------------------------------------------------------------
Supervisor callback
=========================================================================
API
=========================================================================
=========================================================================
Supervisor callback
=========================================================================
=========================================================================
Internal functions
=========================================================================
As simple_one_for_one is used. | Copyright Ericsson AB 2008 - 2013 . All Rights Reserved .
The contents of this file are subject to the Erlang Public License ,
Version 1.1 , ( the " License " ) ; you may not use this file except in
Software distributed under the License is distributed on an " AS IS "
-module(sshc_sup).
-behaviour(supervisor).
-export([start_link/1, start_child/1, stop_child/1]).
-export([init/1]).
start_link(Args) ->
supervisor:start_link({local, ?MODULE}, ?MODULE, [Args]).
start_child(Args) ->
supervisor:start_child(?MODULE, Args).
stop_child(Client) ->
spawn(fun() ->
ClientSup = whereis(?MODULE),
supervisor:terminate_child(ClientSup, Client)
end),
ok.
init(Args) ->
RestartStrategy = simple_one_for_one,
MaxR = 0,
MaxT = 3600,
{ok, {{RestartStrategy, MaxR, MaxT}, [child_spec(Args)]}}.
child_spec(_) ->
StartFunc = {ssh_connection_handler, start_link, []},
Restart = temporary,
Shutdown = infinity,
Modules = [ssh_connection_handler],
Type = supervisor,
{Name, StartFunc, Restart, Shutdown, Type, Modules}.
|
8b5a0f1e9a8d47a1d594b2eae89085801a0f80047078c777d074ff910503b093 | screenshotbot/screenshotbot-oss | test-auth.lisp | (defpackage :test-auth
(:use :cl
:fiveam)
(:import-from :util/testing
:with-fake-request)
(:import-from :auth
:fix-cookie-domain
#+windows
:read-windows-seed)
(:import-from #:util/store
#:with-test-store)
(:export))
(in-package :test-auth)
(def-suite* :test-auth)
(def-fixture state ()
(with-test-store ()
(with-fake-request ()
(&body))))
(test auth-simple-test
(with-fixture state ()
(auth:with-sessions ()
(is-true (auth:current-session))
(is (equal (auth:current-session)
(auth:current-session))))))
(test simple-key-val
(with-fixture state ()
(auth:with-sessions ()
(setf (auth:session-value :name) 33)
(is (equal 33 (auth:session-value :name)))
(setf (auth:session-value :name) 44)
(is (equal 44 (auth:session-value :name))))))
#+windows
(test read-windows-seed
(is-true (read-windows-seed)))
(test fix-cookie-domain
(is (equal "foo.com" (fix-cookie-domain "foo.com")))
(is (equal "localhost" (fix-cookie-domain "localhost")))
(is (equal "192.168.1.119" (fix-cookie-domain "192.168.1.119")))
;; We used to map www.foo.com to foo.com, but that logic is hard to
;; maintain
(is (equal "www.foo.com" (fix-cookie-domain "www.foo.com")))
(is (equal "192.168.1.120" (fix-cookie-domain "192.168.1.120"))))
| null | https://raw.githubusercontent.com/screenshotbot/screenshotbot-oss/5e510b43a0c2f28db26b0d68f1fbf8fe7a399381/src/auth/test-auth.lisp | lisp | We used to map www.foo.com to foo.com, but that logic is hard to
maintain | (defpackage :test-auth
(:use :cl
:fiveam)
(:import-from :util/testing
:with-fake-request)
(:import-from :auth
:fix-cookie-domain
#+windows
:read-windows-seed)
(:import-from #:util/store
#:with-test-store)
(:export))
(in-package :test-auth)
(def-suite* :test-auth)
(def-fixture state ()
(with-test-store ()
(with-fake-request ()
(&body))))
(test auth-simple-test
(with-fixture state ()
(auth:with-sessions ()
(is-true (auth:current-session))
(is (equal (auth:current-session)
(auth:current-session))))))
(test simple-key-val
(with-fixture state ()
(auth:with-sessions ()
(setf (auth:session-value :name) 33)
(is (equal 33 (auth:session-value :name)))
(setf (auth:session-value :name) 44)
(is (equal 44 (auth:session-value :name))))))
#+windows
(test read-windows-seed
(is-true (read-windows-seed)))
(test fix-cookie-domain
(is (equal "foo.com" (fix-cookie-domain "foo.com")))
(is (equal "localhost" (fix-cookie-domain "localhost")))
(is (equal "192.168.1.119" (fix-cookie-domain "192.168.1.119")))
(is (equal "www.foo.com" (fix-cookie-domain "www.foo.com")))
(is (equal "192.168.1.120" (fix-cookie-domain "192.168.1.120"))))
|
c7da048220b5cb80e5d1f542162986983aaa264c7c040807aaa87f1e5990f8c0 | lambe-lang/compiler | expr.mli | open Lambe_ast
module Render : sig
val pp : Format.formatter -> 'a Expr.t -> unit
val check : Format.formatter -> 'a Expr.t -> 'a Type.t option -> unit
end
| null | https://raw.githubusercontent.com/lambe-lang/compiler/79d7937c06ca30e231855ec4ce99012ca0395cd5/lib/render/expr.mli | ocaml | open Lambe_ast
module Render : sig
val pp : Format.formatter -> 'a Expr.t -> unit
val check : Format.formatter -> 'a Expr.t -> 'a Type.t option -> unit
end
| |
485c2e0caee84018f745ec0c0a8be6cf50d661754fa64940545d5d5b33cd9405 | fukamachi/qlot | quicklisp.lisp | (defpackage #:qlot/install/quicklisp
(:use #:cl)
(:import-from #:qlot/logger
#:message)
(:import-from #:qlot/proxy
#:*proxy*)
(:import-from #:qlot/utils
#:generate-random-string)
(:import-from #:qlot/utils/shell
#:run-lisp)
(:import-from #:qlot/utils/tmp
#:with-tmp-directory)
(:export #:install-quicklisp))
(in-package #:qlot/install/quicklisp)
(defun fetch-installer (to)
(let ((quicklisp-file (if (uiop:directory-pathname-p to)
(merge-pathnames (format nil "quicklisp-~A.lisp"
(generate-random-string))
to)
to)))
(progv (list (intern #.(string '#:*proxy-url*) '#:ql-http))
(list *proxy*)
(uiop:symbol-call '#:ql-http '#:http-fetch
""
quicklisp-file))
quicklisp-file))
(defun install-quicklisp (path)
(message "Installing Quicklisp to ~A ..." path)
(with-tmp-directory (tmp-dir)
(let ((quicklisp-file (fetch-installer tmp-dir)))
(run-lisp (list
`(let ((*standard-output* (make-broadcast-stream)))
(load ,quicklisp-file))
"(setf quicklisp-quickstart:*after-initial-setup-message* \"\")"
(format nil "(let ((*standard-output* (make-broadcast-stream)) (*trace-output* (make-broadcast-stream))) (quicklisp-quickstart:install :path #P\"~A\"~@[ :proxy \"~A\"~]))"
path
*proxy*))
:without-quicklisp t)
t)))
| null | https://raw.githubusercontent.com/fukamachi/qlot/96c40e6e6193f4bcbc61fe23aee98916347e2d94/install/quicklisp.lisp | lisp | (defpackage #:qlot/install/quicklisp
(:use #:cl)
(:import-from #:qlot/logger
#:message)
(:import-from #:qlot/proxy
#:*proxy*)
(:import-from #:qlot/utils
#:generate-random-string)
(:import-from #:qlot/utils/shell
#:run-lisp)
(:import-from #:qlot/utils/tmp
#:with-tmp-directory)
(:export #:install-quicklisp))
(in-package #:qlot/install/quicklisp)
(defun fetch-installer (to)
(let ((quicklisp-file (if (uiop:directory-pathname-p to)
(merge-pathnames (format nil "quicklisp-~A.lisp"
(generate-random-string))
to)
to)))
(progv (list (intern #.(string '#:*proxy-url*) '#:ql-http))
(list *proxy*)
(uiop:symbol-call '#:ql-http '#:http-fetch
""
quicklisp-file))
quicklisp-file))
(defun install-quicklisp (path)
(message "Installing Quicklisp to ~A ..." path)
(with-tmp-directory (tmp-dir)
(let ((quicklisp-file (fetch-installer tmp-dir)))
(run-lisp (list
`(let ((*standard-output* (make-broadcast-stream)))
(load ,quicklisp-file))
"(setf quicklisp-quickstart:*after-initial-setup-message* \"\")"
(format nil "(let ((*standard-output* (make-broadcast-stream)) (*trace-output* (make-broadcast-stream))) (quicklisp-quickstart:install :path #P\"~A\"~@[ :proxy \"~A\"~]))"
path
*proxy*))
:without-quicklisp t)
t)))
| |
9290b03accea4304ffb14d0a1773bc65a266f4af5d67ee7094ecb09a4f8f72a1 | Clozure/ccl-tests | floor.lsp | ;-*- Mode: Lisp -*-
Author :
Created : Mon Aug 4 22:16:00 2003
;;;; Contains: Tests of FLOOR
(in-package :cl-test)
(compile-and-load "numbers-aux.lsp")
(compile-and-load "floor-aux.lsp")
;;; Error tests
(deftest floor.error.1
(signals-error (floor) program-error)
t)
(deftest floor.error.2
(signals-error (floor 1.0 1 nil) program-error)
t)
;;; Non-error tests
(deftest floor.1
(floor.1-fn)
nil)
(deftest floor.2
(floor.2-fn)
nil)
(deftest floor.3
(floor.3-fn 2.0s4)
nil)
(deftest floor.4
(floor.3-fn 2.0f4)
nil)
(deftest floor.5
(floor.3-fn 2.0d4)
nil)
(deftest floor.6
(floor.3-fn 2.0l4)
nil)
(deftest floor.7
(floor.7-fn)
nil)
(deftest floor.8
(floor.8-fn)
nil)
(deftest floor.9
(floor.9-fn)
nil)
(deftest floor.10
(loop for x in (remove-if #'zerop *reals*)
for (q r) = (multiple-value-list (floor x x))
unless (and (eql q 1)
(zerop r)
(if (rationalp x) (eql r 0)
(eql r (float 0 x))))
collect x)
nil)
(deftest floor.11
(loop for x in (remove-if #'zerop *reals*)
for (q r) = (multiple-value-list (floor (- x) x))
unless (and (eql q -1)
(zerop r)
(if (rationalp x) (eql r 0)
(eql r (float 0 x))))
collect x)
nil)
(deftest floor.12
(let* ((radix (float-radix 1.0s0))
(rad (float radix 1.0s0))
(rrad (/ 1.0s0 rad)))
(loop for i from 1 to 1000
for x = (+ i rrad)
for (q r) = (multiple-value-list (floor x))
unless (and (eql q i)
(eql r rrad))
collect (list i x q r)))
nil)
(deftest floor.13
(let* ((radix (float-radix 1.0s0))
(rad (float radix 1.0s0))
(rrad (/ 1.0s0 rad)))
(loop for i from 1 to 1000
for x = (- i rrad)
for (q r) = (multiple-value-list (floor x))
unless (and (eql q (1- i))
(eql r rrad))
collect (list i x q r)))
nil)
(deftest floor.14
(let* ((radix (float-radix 1.0f0))
(rad (float radix 1.0f0))
(rrad (/ 1.0f0 rad)))
(loop for i from 1 to 1000
for x = (+ i rrad)
for (q r) = (multiple-value-list (floor x))
unless (and (eql q i)
(eql r rrad))
collect (list i x q r)))
nil)
(deftest floor.15
(let* ((radix (float-radix 1.0f0))
(rad (float radix 1.0f0))
(rrad (/ 1.0f0 rad)))
(loop for i from 1 to 1000
for x = (- i rrad)
for (q r) = (multiple-value-list (floor x))
unless (and (eql q (1- i))
(eql r rrad))
collect (list i x q r)))
nil)
(deftest floor.16
(let* ((radix (float-radix 1.0d0))
(rad (float radix 1.0d0))
(rrad (/ 1.0d0 rad)))
(loop for i from 1 to 1000
for x = (+ i rrad)
for (q r) = (multiple-value-list (floor x))
unless (and (eql q i)
(eql r rrad))
collect (list i x q r)))
nil)
(deftest floor.17
(let* ((radix (float-radix 1.0d0))
(rad (float radix 1.0d0))
(rrad (/ 1.0d0 rad)))
(loop for i from 1 to 1000
for x = (- i rrad)
for (q r) = (multiple-value-list (floor x))
unless (and (eql q (1- i))
(eql r rrad))
collect (list i x q r)))
nil)
(deftest floor.18
(let* ((radix (float-radix 1.0l0))
(rad (float radix 1.0l0))
(rrad (/ 1.0l0 rad)))
(loop for i from 1 to 1000
for x = (+ i rrad)
for (q r) = (multiple-value-list (floor x))
unless (and (eql q i)
(eql r rrad))
collect (list i x q r)))
nil)
(deftest floor.19
(let* ((radix (float-radix 1.0l0))
(rad (float radix 1.0l0))
(rrad (/ 1.0l0 rad)))
(loop for i from 1 to 1000
for x = (- i rrad)
for (q r) = (multiple-value-list (floor x))
unless (and (eql q (1- i))
(eql r rrad))
collect (list i x q r)))
nil)
;;; To add: tests that involve adding/subtracting EPSILON constants
;;; (suitably scaled) to floated integers.
| null | https://raw.githubusercontent.com/Clozure/ccl-tests/0478abddb34dbc16487a1975560d8d073a988060/ansi-tests/floor.lsp | lisp | -*- Mode: Lisp -*-
Contains: Tests of FLOOR
Error tests
Non-error tests
To add: tests that involve adding/subtracting EPSILON constants
(suitably scaled) to floated integers. | Author :
Created : Mon Aug 4 22:16:00 2003
(in-package :cl-test)
(compile-and-load "numbers-aux.lsp")
(compile-and-load "floor-aux.lsp")
(deftest floor.error.1
(signals-error (floor) program-error)
t)
(deftest floor.error.2
(signals-error (floor 1.0 1 nil) program-error)
t)
(deftest floor.1
(floor.1-fn)
nil)
(deftest floor.2
(floor.2-fn)
nil)
(deftest floor.3
(floor.3-fn 2.0s4)
nil)
(deftest floor.4
(floor.3-fn 2.0f4)
nil)
(deftest floor.5
(floor.3-fn 2.0d4)
nil)
(deftest floor.6
(floor.3-fn 2.0l4)
nil)
(deftest floor.7
(floor.7-fn)
nil)
(deftest floor.8
(floor.8-fn)
nil)
(deftest floor.9
(floor.9-fn)
nil)
(deftest floor.10
(loop for x in (remove-if #'zerop *reals*)
for (q r) = (multiple-value-list (floor x x))
unless (and (eql q 1)
(zerop r)
(if (rationalp x) (eql r 0)
(eql r (float 0 x))))
collect x)
nil)
(deftest floor.11
(loop for x in (remove-if #'zerop *reals*)
for (q r) = (multiple-value-list (floor (- x) x))
unless (and (eql q -1)
(zerop r)
(if (rationalp x) (eql r 0)
(eql r (float 0 x))))
collect x)
nil)
(deftest floor.12
(let* ((radix (float-radix 1.0s0))
(rad (float radix 1.0s0))
(rrad (/ 1.0s0 rad)))
(loop for i from 1 to 1000
for x = (+ i rrad)
for (q r) = (multiple-value-list (floor x))
unless (and (eql q i)
(eql r rrad))
collect (list i x q r)))
nil)
(deftest floor.13
(let* ((radix (float-radix 1.0s0))
(rad (float radix 1.0s0))
(rrad (/ 1.0s0 rad)))
(loop for i from 1 to 1000
for x = (- i rrad)
for (q r) = (multiple-value-list (floor x))
unless (and (eql q (1- i))
(eql r rrad))
collect (list i x q r)))
nil)
(deftest floor.14
(let* ((radix (float-radix 1.0f0))
(rad (float radix 1.0f0))
(rrad (/ 1.0f0 rad)))
(loop for i from 1 to 1000
for x = (+ i rrad)
for (q r) = (multiple-value-list (floor x))
unless (and (eql q i)
(eql r rrad))
collect (list i x q r)))
nil)
(deftest floor.15
(let* ((radix (float-radix 1.0f0))
(rad (float radix 1.0f0))
(rrad (/ 1.0f0 rad)))
(loop for i from 1 to 1000
for x = (- i rrad)
for (q r) = (multiple-value-list (floor x))
unless (and (eql q (1- i))
(eql r rrad))
collect (list i x q r)))
nil)
(deftest floor.16
(let* ((radix (float-radix 1.0d0))
(rad (float radix 1.0d0))
(rrad (/ 1.0d0 rad)))
(loop for i from 1 to 1000
for x = (+ i rrad)
for (q r) = (multiple-value-list (floor x))
unless (and (eql q i)
(eql r rrad))
collect (list i x q r)))
nil)
(deftest floor.17
(let* ((radix (float-radix 1.0d0))
(rad (float radix 1.0d0))
(rrad (/ 1.0d0 rad)))
(loop for i from 1 to 1000
for x = (- i rrad)
for (q r) = (multiple-value-list (floor x))
unless (and (eql q (1- i))
(eql r rrad))
collect (list i x q r)))
nil)
(deftest floor.18
(let* ((radix (float-radix 1.0l0))
(rad (float radix 1.0l0))
(rrad (/ 1.0l0 rad)))
(loop for i from 1 to 1000
for x = (+ i rrad)
for (q r) = (multiple-value-list (floor x))
unless (and (eql q i)
(eql r rrad))
collect (list i x q r)))
nil)
(deftest floor.19
(let* ((radix (float-radix 1.0l0))
(rad (float radix 1.0l0))
(rrad (/ 1.0l0 rad)))
(loop for i from 1 to 1000
for x = (- i rrad)
for (q r) = (multiple-value-list (floor x))
unless (and (eql q (1- i))
(eql r rrad))
collect (list i x q r)))
nil)
|
3bc1d845ea92ba394ad09065607f41f79c2937431ecbdae30c80ccee1f8f4061 | luizalbertocviana/haskell-experiments | Memo.hs | module Memo where
import Prelude (Bool, Int, (+), (-), (*), (==),
rem, div, (<$>))
import Bool
import Functions
import InfinityTree
memo :: (Int -> b) -> Int -> b
memo f = find ftree where
natTree = Branch 0 oddTree evenTree where
oddTree = ((+1) . (*2)) <$> natTree
evenTree = ((+2) . (*2)) <$> natTree
ftree = f <$> natTree
fib :: Int -> Int
fib = fix (memo . f) where
f _ 0 = 0
f _ 1 = 1
f rec n = rec (n - 1) + rec (n - 2)
| null | https://raw.githubusercontent.com/luizalbertocviana/haskell-experiments/eee61245316e3d78819f88b0e60e0c3a3374483e/Memo.hs | haskell | module Memo where
import Prelude (Bool, Int, (+), (-), (*), (==),
rem, div, (<$>))
import Bool
import Functions
import InfinityTree
memo :: (Int -> b) -> Int -> b
memo f = find ftree where
natTree = Branch 0 oddTree evenTree where
oddTree = ((+1) . (*2)) <$> natTree
evenTree = ((+2) . (*2)) <$> natTree
ftree = f <$> natTree
fib :: Int -> Int
fib = fix (memo . f) where
f _ 0 = 0
f _ 1 = 1
f rec n = rec (n - 1) + rec (n - 2)
| |
472ccbf1eec53b45c41caff46ecd28b76a3394b75a15d047569d880beb892f7b | PrecursorApp/precursor | blue_ocean_made_of_ink.clj | (ns pc.views.blog.blue-ocean-made-of-ink)
(defn blue-ocean-made-of-ink []
{:title "Our blue ocean is made of ink."
:blurb "For every single designer or developer using an app to prototype their product, there are hundreds more using paper..."
:author "Danny"
:body
(list
[:article
[:p "Following our unexpected "
[:a {:href "/blog/product-hunt-wake-up-call"} "Product Hunt launch"]
", we've received a lot of feedback from users.
I've found the positive comments to be inspiring, and critical ones motivating.
We've even been receiving encouragement from founders of more popular prototyping apps.
This type of praise begs the question, \"Aren't they competition?\""]]
[:article
[:blockquote "Our true competition is not the small trickle of non-Tesla electric cars being produced, but rather the enormous flood of gasoline cars pouring out of the world's factories every day."]]
[:article
[:p [:a {:href "-our-patent-are-belong-you"} "Elon Musk"]
" said that about Telsa last year.
Prototyping stands on similar shores.
For every single designer or developer using an app to prototype their product, there are hundreds more using paper."]]
[:article
[:h3 "Can we compete with paper?"]
[:p "I don't handwrite letters, I won't pay bills with checks, and I never unfold maps to get around.
So why is pen and paper still so utterly ingrained in the traditional design process?
In my case, it's often tied to a desire for more discipline.
Its constraints make pen and paper simple, efficient, and accessible.
So that's where we'll start."]]
[:article
[:h3 "Simplicity—a good start."]
[:p "Pen and paper demands no conscious effort.
So competing here requires a minimum level of cognitive load.
This should explain our lean interface and modest amount of customization options.
Our constraints help us design productive behaviors.
E.g., monotone shapes make emphasizing collaborator activity simple with color."]]
[:article
[:h3 "Efficiency—almost there."]
[:p "I often retreat to pen and paper after finding other prototyping tools to be unwieldy.
By contrast, Precursor's agility has kept me from reaching for a pen for months.
Its precision helps me review old prototypes even after I've forgotten the context.
I save time early in my design process so I can polish more in production."]]
[:article
[:h3 "Accessibility—done."]
[:p "Collaboration is the Precursor flagship.
Handing someone a notebook is easy, but no easier than handing them a phone or a tablet.
Precursor takes the advantage by connecting collaborators around the world.
Entire product teams can be communicating and designing concepts together in real time, within seconds."]]
[:article
[:h3 "We're more productive without pen and paper."]
[:p "Paper inevitably gets damaged or forgotten.
I tried justifying that by looking at my sketches as disposable, but now I'm starting to realize this isn't true.
Whenever I scroll through past prototypes on Precursor, I learn something new about my process.
Consider the implications of a system like that for a moment.
Imagine recalling every drawing or doodle you ever scratched on a piece of paper.
How amazing would that be?"]]
)})
| null | https://raw.githubusercontent.com/PrecursorApp/precursor/30202e40365f6883c4767e423d6299f0d13dc528/src/pc/views/blog/blue_ocean_made_of_ink.clj | clojure | (ns pc.views.blog.blue-ocean-made-of-ink)
(defn blue-ocean-made-of-ink []
{:title "Our blue ocean is made of ink."
:blurb "For every single designer or developer using an app to prototype their product, there are hundreds more using paper..."
:author "Danny"
:body
(list
[:article
[:p "Following our unexpected "
[:a {:href "/blog/product-hunt-wake-up-call"} "Product Hunt launch"]
", we've received a lot of feedback from users.
I've found the positive comments to be inspiring, and critical ones motivating.
We've even been receiving encouragement from founders of more popular prototyping apps.
This type of praise begs the question, \"Aren't they competition?\""]]
[:article
[:blockquote "Our true competition is not the small trickle of non-Tesla electric cars being produced, but rather the enormous flood of gasoline cars pouring out of the world's factories every day."]]
[:article
[:p [:a {:href "-our-patent-are-belong-you"} "Elon Musk"]
" said that about Telsa last year.
Prototyping stands on similar shores.
For every single designer or developer using an app to prototype their product, there are hundreds more using paper."]]
[:article
[:h3 "Can we compete with paper?"]
[:p "I don't handwrite letters, I won't pay bills with checks, and I never unfold maps to get around.
So why is pen and paper still so utterly ingrained in the traditional design process?
In my case, it's often tied to a desire for more discipline.
Its constraints make pen and paper simple, efficient, and accessible.
So that's where we'll start."]]
[:article
[:h3 "Simplicity—a good start."]
[:p "Pen and paper demands no conscious effort.
So competing here requires a minimum level of cognitive load.
This should explain our lean interface and modest amount of customization options.
Our constraints help us design productive behaviors.
E.g., monotone shapes make emphasizing collaborator activity simple with color."]]
[:article
[:h3 "Efficiency—almost there."]
[:p "I often retreat to pen and paper after finding other prototyping tools to be unwieldy.
By contrast, Precursor's agility has kept me from reaching for a pen for months.
Its precision helps me review old prototypes even after I've forgotten the context.
I save time early in my design process so I can polish more in production."]]
[:article
[:h3 "Accessibility—done."]
[:p "Collaboration is the Precursor flagship.
Handing someone a notebook is easy, but no easier than handing them a phone or a tablet.
Precursor takes the advantage by connecting collaborators around the world.
Entire product teams can be communicating and designing concepts together in real time, within seconds."]]
[:article
[:h3 "We're more productive without pen and paper."]
[:p "Paper inevitably gets damaged or forgotten.
I tried justifying that by looking at my sketches as disposable, but now I'm starting to realize this isn't true.
Whenever I scroll through past prototypes on Precursor, I learn something new about my process.
Consider the implications of a system like that for a moment.
Imagine recalling every drawing or doodle you ever scratched on a piece of paper.
How amazing would that be?"]]
)})
| |
bfbfa1f5d9b34894166859adc22b33c67c766f363864366e9d3e936d40c04f35 | bobzhang/fan | test_gram.ml | open Format;
open Camlp4.PreCast
let parser_of_entry entry s =
try Gram.parse entry (Loc.mk "<string>") (Stream.of_string s)
with
Loc.Exc_located(loc, e) -> begin
prerr_endline (Loc.to_string loc);
let start_bol,stop_bol,
start_off, stop_off =
Loc.(start_bol loc,
stop_bol loc,
start_off loc,
stop_off loc
) in
let abs_start_off = start_bol + start_off in
let abs_stop_off = stop_bol + stop_off in
let err_location = String.sub s abs_start_off
(abs_stop_off - abs_start_off + 1) in
prerr_endline (sprintf "err: ^%s^" err_location);
raise e ;
end
let expression = Gram.Entry.mk "expression"
let expression_eoi = Gram.Entry.mk "expression_eoi"
let _ = begin
EXTEND Gram GLOBAL: expression expression_eoi;
expression_eoi:
[ [x = expression ; `EOI -> x ] ];
expression:
[
"top"
[ x=SELF ;"+";y=SELF -> x+y
| x=SELF ;"-";y=SELF -> x-y]
"mult"
[ x=SELF ;"*";y=SELF -> x*y
| x=SELF ;"/";y=SELF -> x/y ]
|"simple"
[ "("; x = SELF; ")" -> x ]
];
END;
end
let parse_expression = parser_of_entry expression
let parse_expression_eoi = parser_of_entry expression_eoi
| null | https://raw.githubusercontent.com/bobzhang/fan/7ed527d96c5a006da43d3813f32ad8a5baa31b7f/src/todoml/test/test_gram.ml | ocaml | open Format;
open Camlp4.PreCast
let parser_of_entry entry s =
try Gram.parse entry (Loc.mk "<string>") (Stream.of_string s)
with
Loc.Exc_located(loc, e) -> begin
prerr_endline (Loc.to_string loc);
let start_bol,stop_bol,
start_off, stop_off =
Loc.(start_bol loc,
stop_bol loc,
start_off loc,
stop_off loc
) in
let abs_start_off = start_bol + start_off in
let abs_stop_off = stop_bol + stop_off in
let err_location = String.sub s abs_start_off
(abs_stop_off - abs_start_off + 1) in
prerr_endline (sprintf "err: ^%s^" err_location);
raise e ;
end
let expression = Gram.Entry.mk "expression"
let expression_eoi = Gram.Entry.mk "expression_eoi"
let _ = begin
EXTEND Gram GLOBAL: expression expression_eoi;
expression_eoi:
[ [x = expression ; `EOI -> x ] ];
expression:
[
"top"
[ x=SELF ;"+";y=SELF -> x+y
| x=SELF ;"-";y=SELF -> x-y]
"mult"
[ x=SELF ;"*";y=SELF -> x*y
| x=SELF ;"/";y=SELF -> x/y ]
|"simple"
[ "("; x = SELF; ")" -> x ]
];
END;
end
let parse_expression = parser_of_entry expression
let parse_expression_eoi = parser_of_entry expression_eoi
| |
838e055c67027001a51b5bc53d5d2fc2e550f2d5d6c4503fd07f2feceab8b25e | albertoruiz/easyVision | stand3.hs | import Vision.GUI
import Image
import Util.Misc(replaceAt)
main = runIt win
win = editor update save "editor" [2,4 .. 10] sh
where
sh k x = Draw [ color white $ text (Point 0 0) (show x)
, color yellow $ text (Point 0.9 0.8) ("# "++show k) ]
update = [ op (Char '+') succ
, op (Char '-') pred
, opS (Char 'P') (*10)
]
save = [(ctrlS, \_roi _pt (_k,xs) -> print xs)]
ctrlS = kCtrl (key (Char '\DC3'))
op c f = updateItem (key c) (const.const $ f)
opS c f = updateItem ((kShift . key) c) (const.const $ f)
| null | https://raw.githubusercontent.com/albertoruiz/easyVision/26bb2efaa676c902cecb12047560a09377a969f2/projects/tour/stand3.hs | haskell | import Vision.GUI
import Image
import Util.Misc(replaceAt)
main = runIt win
win = editor update save "editor" [2,4 .. 10] sh
where
sh k x = Draw [ color white $ text (Point 0 0) (show x)
, color yellow $ text (Point 0.9 0.8) ("# "++show k) ]
update = [ op (Char '+') succ
, op (Char '-') pred
, opS (Char 'P') (*10)
]
save = [(ctrlS, \_roi _pt (_k,xs) -> print xs)]
ctrlS = kCtrl (key (Char '\DC3'))
op c f = updateItem (key c) (const.const $ f)
opS c f = updateItem ((kShift . key) c) (const.const $ f)
| |
b0f275302614d2583e998c26ffeb73ad702398950d7ecb0842c3041634b5f079 | justinj/string-matching-visualization | boyer_moore.cljs | (ns strmatch.logic.boyer-moore
(:use [strmatch.logic.common :only [discrepancy-index]]))
The Boyer - Moore algorithm has a couple differences from KMP .
First , it begins trying to match from the right side , rather than the left .
Second , it uses two tables to calculate its jumps ( last occurrence and suffix ) , rather than just one .
; In any situation, it checks what jump either table would allow, and takes the better one.
It happens that the two tables tend to complement each other quite well , so large jumps are often possible .
;
;
; The last-occurrence checker takes a character and returns the last occurence
; of that character in the needle, or -1 if it does not occur.
; This is used with the haystack to push the needle past all the points where the current
; haystack character could not possibly occur.
; Example:
;
; needle: jey
; haystack: abjjey
; -> abjjey
; -> jey
;
First , we check the value at i = 2 for a match .
; We don't get one. Then we find the "last occurrence" of 'j' in our needle, and align that with the 'j':
;
; -> abjjey
; -> jey
;
; The same process repeats with the 'e', we align the 'e' with the last occurence:
; -> abjjey
; -> jey
;
; And find the match.
(defn last-occurrence
[string]
(fn [chr]
(.lastIndexOf string chr)))
(defn- matches-at
[string suffix index]
(let [chrs (vec string)]
(and
(or (neg? index)
(not (= (chrs index) (suffix 0))))
(every? (fn [i]
(or (neg? (+ i index))
(= (chrs (+ i index)) (suffix i))))
(range 1 (count suffix))))))
(defn- match-index
[string suffix]
(second
(first
(filter first
(map #(vector (matches-at string (vec suffix) %) %)
(range (- (count string) (count suffix)) (- -1 (count suffix)) -1))))))
(defn- good-suffix-for-index
[string index]
(let [suffix (drop index string)]
(match-index string suffix)))
; The suffix checker is constructed as follows:
;
; For index `i`, take `s`, the substring beginning at index `i`.
so if our string is NEEDLE , and i = 4 , we take ` s ` = " LE " .
; We then find the farthest right alignment of `s` with the needle,
such that the first character * does not * match , and the rest of ` s ` does .
;
; In this case, the alignment is:
; -> NEEDLE
; -> LE
;
; You could think of it as finding the largest index that [^L]E matches.
(defn good-suffix
[string]
(vec (map
#(good-suffix-for-index string %)
(range 0 (count string)))))
(defn- reverse-discrepancy-index
[needle haystack index]
(let [relevant-haystack (take (count needle) (drop index haystack))
reversed-index (discrepancy-index (reverse needle) (reverse relevant-haystack) 0)]
(if reversed-index
(- (count needle) (inc reversed-index))
false)))
(defn- calculate-jump
[needle haystack index]
(let [discrep (reverse-discrepancy-index needle haystack index)
good-suff (good-suffix needle)
last-occ (last-occurrence needle)]
(- discrep (min (good-suff discrep)
(last-occ (nth haystack (+ index discrep)))))))
(defn- color-array [index discrep needle]
(concat (map (fn [i] { :color :green
:index i })
(reverse (range (if discrep (inc discrep) 0) (count needle))))
(if discrep [{:index discrep :color :red}] [])))
(defn- explanation-for
[needle haystack index]
(let [discrep (reverse-discrepancy-index needle haystack index)
good-suff (good-suffix needle)
good-suff-value (good-suff discrep)
last-occ (last-occurrence needle)
haystack-char (nth haystack (+ index discrep))
last-occ-value (last-occ haystack-char)]
(cond
(nil? haystack-char) "No match found :("
discrep (str
"discrepancy_index = " discrep "<br>"
"last_occurrence(" haystack-char ") = " last-occ-value "<br>"
"good_suffix(" discrep ") = " good-suff-value
"<br><br>"
"Good Suffix gives a jump of (" discrep ") - (" good-suff-value ") = " (- discrep good-suff-value) "<br>"
"Last Occurrence gives a jump of (" discrep ") - (" last-occ-value") = " (- discrep last-occ-value) "<br>"
"So we "
(cond (< good-suff-value last-occ-value) "go with Good Suffix"
(> good-suff-value last-occ-value) "go with Last Occurrence"
:else "are indifferent"))
:else "Match found!")))
(defn- match-data
[needle haystack]
(loop [index 0
acc []]
(let [discrep (reverse-discrepancy-index needle haystack index)
jump (calculate-jump needle haystack index)
colors (color-array index discrep needle)
explanation (explanation-for needle haystack index)
entry {:index index
:colors colors
:explanation explanation}
result (conj acc entry)]
(if (and discrep
(<= (+ index (count needle)) (count haystack)))
(recur (+ index jump) result)
result))))
(defn match
[needle haystack]
(let [good-suff (good-suffix needle)
last-occ (last-occurrence needle)]
{:animation (match-data needle haystack)
:tables [(concat [["i" "Suffix Location"]] (map vector (range) good-suff))
(concat [["char" "Last Occurrence"]]
(map #(vector % (last-occ %)) (distinct needle)))]}))
| null | https://raw.githubusercontent.com/justinj/string-matching-visualization/9aee3139d087860f4235b78b9a5dfc71df7a4e67/src/cljs/strmatch/logic/boyer_moore.cljs | clojure | In any situation, it checks what jump either table would allow, and takes the better one.
The last-occurrence checker takes a character and returns the last occurence
of that character in the needle, or -1 if it does not occur.
This is used with the haystack to push the needle past all the points where the current
haystack character could not possibly occur.
Example:
needle: jey
haystack: abjjey
-> abjjey
-> jey
We don't get one. Then we find the "last occurrence" of 'j' in our needle, and align that with the 'j':
-> abjjey
-> jey
The same process repeats with the 'e', we align the 'e' with the last occurence:
-> abjjey
-> jey
And find the match.
The suffix checker is constructed as follows:
For index `i`, take `s`, the substring beginning at index `i`.
We then find the farthest right alignment of `s` with the needle,
In this case, the alignment is:
-> NEEDLE
-> LE
You could think of it as finding the largest index that [^L]E matches. | (ns strmatch.logic.boyer-moore
(:use [strmatch.logic.common :only [discrepancy-index]]))
The Boyer - Moore algorithm has a couple differences from KMP .
First , it begins trying to match from the right side , rather than the left .
Second , it uses two tables to calculate its jumps ( last occurrence and suffix ) , rather than just one .
It happens that the two tables tend to complement each other quite well , so large jumps are often possible .
First , we check the value at i = 2 for a match .
(defn last-occurrence
[string]
(fn [chr]
(.lastIndexOf string chr)))
(defn- matches-at
[string suffix index]
(let [chrs (vec string)]
(and
(or (neg? index)
(not (= (chrs index) (suffix 0))))
(every? (fn [i]
(or (neg? (+ i index))
(= (chrs (+ i index)) (suffix i))))
(range 1 (count suffix))))))
(defn- match-index
[string suffix]
(second
(first
(filter first
(map #(vector (matches-at string (vec suffix) %) %)
(range (- (count string) (count suffix)) (- -1 (count suffix)) -1))))))
(defn- good-suffix-for-index
[string index]
(let [suffix (drop index string)]
(match-index string suffix)))
so if our string is NEEDLE , and i = 4 , we take ` s ` = " LE " .
such that the first character * does not * match , and the rest of ` s ` does .
(defn good-suffix
[string]
(vec (map
#(good-suffix-for-index string %)
(range 0 (count string)))))
(defn- reverse-discrepancy-index
[needle haystack index]
(let [relevant-haystack (take (count needle) (drop index haystack))
reversed-index (discrepancy-index (reverse needle) (reverse relevant-haystack) 0)]
(if reversed-index
(- (count needle) (inc reversed-index))
false)))
(defn- calculate-jump
[needle haystack index]
(let [discrep (reverse-discrepancy-index needle haystack index)
good-suff (good-suffix needle)
last-occ (last-occurrence needle)]
(- discrep (min (good-suff discrep)
(last-occ (nth haystack (+ index discrep)))))))
(defn- color-array [index discrep needle]
(concat (map (fn [i] { :color :green
:index i })
(reverse (range (if discrep (inc discrep) 0) (count needle))))
(if discrep [{:index discrep :color :red}] [])))
(defn- explanation-for
[needle haystack index]
(let [discrep (reverse-discrepancy-index needle haystack index)
good-suff (good-suffix needle)
good-suff-value (good-suff discrep)
last-occ (last-occurrence needle)
haystack-char (nth haystack (+ index discrep))
last-occ-value (last-occ haystack-char)]
(cond
(nil? haystack-char) "No match found :("
discrep (str
"discrepancy_index = " discrep "<br>"
"last_occurrence(" haystack-char ") = " last-occ-value "<br>"
"good_suffix(" discrep ") = " good-suff-value
"<br><br>"
"Good Suffix gives a jump of (" discrep ") - (" good-suff-value ") = " (- discrep good-suff-value) "<br>"
"Last Occurrence gives a jump of (" discrep ") - (" last-occ-value") = " (- discrep last-occ-value) "<br>"
"So we "
(cond (< good-suff-value last-occ-value) "go with Good Suffix"
(> good-suff-value last-occ-value) "go with Last Occurrence"
:else "are indifferent"))
:else "Match found!")))
(defn- match-data
[needle haystack]
(loop [index 0
acc []]
(let [discrep (reverse-discrepancy-index needle haystack index)
jump (calculate-jump needle haystack index)
colors (color-array index discrep needle)
explanation (explanation-for needle haystack index)
entry {:index index
:colors colors
:explanation explanation}
result (conj acc entry)]
(if (and discrep
(<= (+ index (count needle)) (count haystack)))
(recur (+ index jump) result)
result))))
(defn match
[needle haystack]
(let [good-suff (good-suffix needle)
last-occ (last-occurrence needle)]
{:animation (match-data needle haystack)
:tables [(concat [["i" "Suffix Location"]] (map vector (range) good-suff))
(concat [["char" "Last Occurrence"]]
(map #(vector % (last-occ %)) (distinct needle)))]}))
|
f577b1471e43d941eb051514f5539edddfa6e4e198ef506d5eb1cf11a9b13e24 | processone/zlib | ezlib_sup.erl | %%%----------------------------------------------------------------------
%%% File : ezlib_sup.erl
Author : < >
Purpose : supervisor
Created : 4 Apr 2013 by < >
%%%
%%%
ezlib , Copyright ( C ) 2002 - 2015 ProcessOne
%%%
%%% This program is free software; you can redistribute it and/or
modify it under the terms of the GNU General Public License as
published by the Free Software Foundation ; either version 2 of the
%%% License, or (at your option) any later version.
%%%
%%% This program is distributed in the hope that it will be useful,
%%% but WITHOUT ANY WARRANTY; without even the implied warranty of
%%% MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
%%% General Public License for more details.
%%%
You should have received a copy of the GNU General Public License
%%% along with this program; if not, write to the Free Software
Foundation , Inc. , 59 Temple Place , Suite 330 , Boston , MA
02111 - 1307 USA
%%%
%%%----------------------------------------------------------------------
-module(ezlib_sup).
-behaviour(supervisor).
%% API
-export([start_link/0]).
%% Supervisor callbacks
-export([init/1]).
-define(SERVER, ?MODULE).
%%%===================================================================
%%% API functions
%%%===================================================================
%%--------------------------------------------------------------------
%% @doc
%% Starts the supervisor
%%
( ) - > { ok , Pid } | ignore | { error , Error }
%% @end
%%--------------------------------------------------------------------
start_link() ->
supervisor:start_link({local, ?SERVER}, ?MODULE, []).
%%%===================================================================
%%% Supervisor callbacks
%%%===================================================================
%%--------------------------------------------------------------------
@private
%% @doc
%% Whenever a supervisor is started using supervisor:start_link/[2,3],
%% this function is called by the new process to find out about
%% restart strategy, maximum restart frequency and child
%% specifications.
%%
) - > { ok , { SupFlags , [ ChildSpec ] } } |
%% ignore |
%% {error, Reason}
%% @end
%%--------------------------------------------------------------------
init([]) ->
EZlib = {ezlib, {ezlib, start_link, []},
permanent, brutal_kill, worker, [ezlib]},
{ok, {{one_for_one, 10, 1}, [EZlib]}}.
%%%===================================================================
Internal functions
%%%===================================================================
| null | https://raw.githubusercontent.com/processone/zlib/3826aca25b6e3e576ec0d099d48e5bfc1afbb48b/src/ezlib_sup.erl | erlang | ----------------------------------------------------------------------
File : ezlib_sup.erl
This program is free software; you can redistribute it and/or
License, or (at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
General Public License for more details.
along with this program; if not, write to the Free Software
----------------------------------------------------------------------
API
Supervisor callbacks
===================================================================
API functions
===================================================================
--------------------------------------------------------------------
@doc
Starts the supervisor
@end
--------------------------------------------------------------------
===================================================================
Supervisor callbacks
===================================================================
--------------------------------------------------------------------
@doc
Whenever a supervisor is started using supervisor:start_link/[2,3],
this function is called by the new process to find out about
restart strategy, maximum restart frequency and child
specifications.
ignore |
{error, Reason}
@end
--------------------------------------------------------------------
===================================================================
=================================================================== | Author : < >
Purpose : supervisor
Created : 4 Apr 2013 by < >
ezlib , Copyright ( C ) 2002 - 2015 ProcessOne
modify it under the terms of the GNU General Public License as
published by the Free Software Foundation ; either version 2 of the
You should have received a copy of the GNU General Public License
Foundation , Inc. , 59 Temple Place , Suite 330 , Boston , MA
02111 - 1307 USA
-module(ezlib_sup).
-behaviour(supervisor).
-export([start_link/0]).
-export([init/1]).
-define(SERVER, ?MODULE).
( ) - > { ok , Pid } | ignore | { error , Error }
start_link() ->
supervisor:start_link({local, ?SERVER}, ?MODULE, []).
@private
) - > { ok , { SupFlags , [ ChildSpec ] } } |
init([]) ->
EZlib = {ezlib, {ezlib, start_link, []},
permanent, brutal_kill, worker, [ezlib]},
{ok, {{one_for_one, 10, 1}, [EZlib]}}.
Internal functions
|
8144a01537c2cb6097ffc18459153ef27576ba2bb76a8310641d603ff4716374 | ghc/packages-dph | USSegd.hs | # LANGUAGE CPP #
{-# OPTIONS -Wall -fno-warn-orphans #-}
#include "fusion-phases.h"
-- | Scattered Segment Descriptors.
--
-- See "Data.Array.Parallel.Unlifted" for how this works.
--
module Data.Array.Parallel.Unlifted.Sequential.USSegd
( -- * Types
USSegd(..)
, valid
-- * Constructors
, mkUSSegd
, empty
, singleton
, fromUSegd
-- * Predicates
, isContiguous
-- * Projections
, length
, takeUSegd, takeLengths, takeIndices, takeElements
, takeSources, takeStarts
, getSeg
-- * Operators
, appendWith
, cullOnVSegids)
where
import Data.Array.Parallel.Unlifted.Sequential.USegd (USegd)
import Data.Array.Parallel.Unlifted.Sequential.Vector (Vector)
import Data.Array.Parallel.Pretty hiding (empty)
import Prelude hiding (length)
import qualified Data.Array.Parallel.Unlifted.Sequential.USegd as USegd
import qualified Data.Array.Parallel.Unlifted.Sequential.Vector as U
import Debug.Trace
here :: String -> String
here s = "Data.Array.Parallel.Unlifted.Sequential.USSegd." ++ s
-- USSegd ---------------------------------------------------------------------
-- | Scattered Segment Descriptor.
data USSegd
= USSegd
{ ussegd_contiguous :: !Bool
-- ^ True when the starts are identical to the usegd indices field
-- and the sources are all 0's.
--
In this case all the data elements are in one contiguous flat
-- array, and consumers can avoid looking at the real starts and
-- sources fields.
, ussegd_starts :: Vector Int
-- ^ Starting index of each segment in its flat array.
--
-- IMPORTANT: this field is lazy so we can avoid creating it when
-- the flat array is contiguous.
, ussegd_sources :: Vector Int
-- ^ Which flat array to take each segment from.
--
-- IMPORTANT: this field is lazy so we can avoid creating it when
-- the flat array is contiguous.
, ussegd_usegd :: !USegd
-- ^ Segment descriptor relative to a contiguous index space.
-- This defines the length of each segment.
}
deriving (Show)
| Pretty print the physical representation of a ` UVSegd `
instance PprPhysical USSegd where
pprp (USSegd _ starts sources ssegd)
= vcat
[ text "USSegd"
$$ (nest 7 $ vcat
[ text "starts: " <+> (text $ show $ U.toList starts)
, text "sources: " <+> (text $ show $ U.toList sources) ])
, pprp ssegd ]
-- Constructors ---------------------------------------------------------------
-- | O(1). Construct a new scattered segment descriptor.
-- All the provided arrays must have the same lengths.
mkUSSegd
:: Vector Int -- ^ Starting index of each segment in its flat array.
-> Vector Int -- ^ Which array to take each segment from.
-> USegd -- ^ Contiguous segment descriptor.
-> USSegd
mkUSSegd = USSegd False
# INLINE mkUSSegd #
-- | O(1). Check the internal consistency of a scattered segment descriptor.
valid :: USSegd -> Bool
valid (USSegd _ starts srcids usegd)
= (U.length starts == USegd.length usegd)
&& (U.length srcids == USegd.length usegd)
# NOINLINE valid #
NOINLINE because it 's only enabled during debugging anyway .
-- | O(1). Construct an empty segment descriptor, with no elements or segments.
empty :: USSegd
empty = USSegd True U.empty U.empty USegd.empty
{-# INLINE_U empty #-}
-- | O(1). Construct a singleton segment descriptor.
-- The single segment covers the given number of elements in a flat array
with sourceid 0 .
singleton :: Int -> USSegd
singleton n
= USSegd True (U.singleton 0) (U.singleton 0) (USegd.singleton n)
# INLINE_U singleton #
| O(segs ) . Promote a plain ` USegd ` to a ` USSegd ` .
All segments are assumed to come from a flat array with sourceid 0 .
fromUSegd :: USegd -> USSegd
fromUSegd usegd
= USSegd True
(USegd.takeIndices usegd)
(U.replicate (USegd.length usegd) 0)
usegd
{-# INLINE_U fromUSegd #-}
-- Predicates -----------------------------------------------------------------
-- INLINE trivial projections as they'll expand to a single record selector.
-- | O(1). True when the starts are identical to the usegd indices field and
-- the sources are all 0's.
--
In this case all the data elements are in one contiguous flat
-- array, and consumers can avoid looking at the real starts and
-- sources fields.
--
isContiguous :: USSegd -> Bool
isContiguous = ussegd_contiguous
# INLINE isContiguous #
-- Projections ----------------------------------------------------------------
-- INLINE trivial projections as they'll expand to a single record selector.
-- | O(1). Yield the overall number of segments.
length :: USSegd -> Int
length = USegd.length . ussegd_usegd
# INLINE length #
| O(1 ) . Yield the ` USegd ` of a ` USSegd ` .
takeUSegd :: USSegd -> USegd
takeUSegd = ussegd_usegd
# INLINE takeUSegd #
| O(1 ) . Yield the lengths of the segments of a ` USSegd ` .
takeLengths :: USSegd -> Vector Int
takeLengths = USegd.takeLengths . ussegd_usegd
# INLINE takeLengths #
| O(1 ) . Yield the segment indices of a ` USSegd ` .
takeIndices :: USSegd -> Vector Int
takeIndices = USegd.takeIndices . ussegd_usegd
# INLINE takeIndices #
| O(1 ) . Yield the total number of elements covered by a ` USSegd ` .
takeElements :: USSegd -> Int
takeElements = USegd.takeElements . ussegd_usegd
# INLINE takeElements #
| O(1 ) . Yield the starting indices of a ` USSegd ` .
takeStarts :: USSegd -> Vector Int
takeStarts = ussegd_starts
# INLINE takeStarts #
| O(1 ) . Yield the source ids of a ` USSegd ` .
takeSources :: USSegd -> Vector Int
takeSources = ussegd_sources
# INLINE takeSources #
-- | O(1). Get the length, segment index, starting index, and source id of a segment.
getSeg :: USSegd -> Int -> (Int, Int, Int, Int)
getSeg (USSegd _ starts sources usegd) ix
= let (len, ixl) = USegd.getSeg usegd ix
in ( len
, ixl
, U.index (here "getSeg") starts ix
, U.index (here "getSeg") sources ix)
{-# INLINE_U getSeg #-}
Operators = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = =
-- | O(n). Produce a segment descriptor that describes the result of appending
two arrays .
appendWith
^ Segment descriptor of first nested array .
^ Number of flat data arrays used to represent first nested array .
^ Segment descriptor of second nested array .
^ Number of flat data arrays used to represent second nested array .
-> USSegd
appendWith
(USSegd _ starts1 srcs1 usegd1) pdatas1
(USSegd _ starts2 srcs2 usegd2) _
= traceEvent
( "dph-prim-seq: USSegd.appendWith."
++ " length(result) = " ++ show (U.length starts1 + U.length starts2))
$ USSegd False
(starts1 U.++ starts2)
(srcs1 U.++ U.map (+ pdatas1) srcs2)
(USegd.append usegd1 usegd2)
# NOINLINE appendWith #
NOINLINE because we 're worried about code explosion . Might be useful though .
| Cull the segments of a ` USSegd ` down to only those reachable from an array
-- of @vsegids@, and also update the @vsegids@ to point to the same segments
-- in the result.
--
cullOnVSegids :: Vector Int -> USSegd -> (Vector Int, USSegd)
cullOnVSegids vsegids (USSegd _ starts sources usegd)
= {-# SCC "cullOnVSegids" #-}
traceEvent
( "dph-prim-seq: USSegd.cullOnVSegids."
++ " length(segmap) = " ++ show (U.length vsegids))
$ let -- Determine which of the psegs are still reachable from the vsegs.
-- This produces an array of flags,
with reachable psegs corresponding to 1
and unreachable psegs corresponding to 0
--
-- eg vsegids: [0 1 1 3 5 5 6 6]
= > psegids_used : [ 1 1 0 1 0 1 1 ]
--
-- Note that psegids '2' and '4' are not in vsegids_packed.
!psegids_used
= U.bpermuteDft (USegd.length usegd)
(const False)
(U.zip vsegids (U.replicate (U.length vsegids) True))
-- Produce an array of used psegs.
eg psegids_used : [ 1 1 0 1 0 1 1 ]
-- psegids_packed: [0 1 3 5 6]
!psegids_packed
= U.pack (U.enumFromTo 0 (U.length psegids_used)) psegids_used
-- Produce an array that maps psegids in the source array onto
-- psegids in the result array. If a particular pseg isn't present
-- in the result this maps onto -1.
-- Note that if psegids_used has 0 in some position, then psegids_map
-- has -1 in the same position, corresponding to an unused pseg.
-- eg psegids_packed: [0 1 3 5 6]
-- [0 1 2 3 4]
psegids_map : [ 0 1 -1 2 -1 3 4 ]
!psegids_map
= U.bpermuteDft (USegd.length usegd)
(const (-1))
(U.zip psegids_packed (U.enumFromTo 0 (U.length psegids_packed - 1)))
-- Use the psegids_map to rewrite the packed vsegids to point to the
-- corresponding psegs in the result.
--
-- eg vsegids: [0 1 1 3 5 5 6 6]
psegids_map : [ 0 1 -1 2 -1 3 4 ]
--
-- vsegids': [0 1 1 2 3 3 4 4]
--
!vsegids' = U.map (U.index (here "cullOnVSegids") psegids_map) vsegids
-- Rebuild the usegd.
!starts' = U.pack starts psegids_used
!sources' = U.pack sources psegids_used
!lengths' = U.pack (USegd.takeLengths usegd) psegids_used
!usegd' = USegd.fromLengths lengths'
!ussegd' = USSegd False starts' sources' usegd'
in (vsegids', ussegd')
# NOINLINE cullOnVSegids #
NOINLINE because it 's complicated and wo n't fuse with anything
This can also be expensive and we want to see the SCC in profiling builds .
| null | https://raw.githubusercontent.com/ghc/packages-dph/64eca669f13f4d216af9024474a3fc73ce101793/dph-prim-seq/Data/Array/Parallel/Unlifted/Sequential/USSegd.hs | haskell | # OPTIONS -Wall -fno-warn-orphans #
| Scattered Segment Descriptors.
See "Data.Array.Parallel.Unlifted" for how this works.
* Types
* Constructors
* Predicates
* Projections
* Operators
USSegd ---------------------------------------------------------------------
| Scattered Segment Descriptor.
^ True when the starts are identical to the usegd indices field
and the sources are all 0's.
array, and consumers can avoid looking at the real starts and
sources fields.
^ Starting index of each segment in its flat array.
IMPORTANT: this field is lazy so we can avoid creating it when
the flat array is contiguous.
^ Which flat array to take each segment from.
IMPORTANT: this field is lazy so we can avoid creating it when
the flat array is contiguous.
^ Segment descriptor relative to a contiguous index space.
This defines the length of each segment.
Constructors ---------------------------------------------------------------
| O(1). Construct a new scattered segment descriptor.
All the provided arrays must have the same lengths.
^ Starting index of each segment in its flat array.
^ Which array to take each segment from.
^ Contiguous segment descriptor.
| O(1). Check the internal consistency of a scattered segment descriptor.
| O(1). Construct an empty segment descriptor, with no elements or segments.
# INLINE_U empty #
| O(1). Construct a singleton segment descriptor.
The single segment covers the given number of elements in a flat array
# INLINE_U fromUSegd #
Predicates -----------------------------------------------------------------
INLINE trivial projections as they'll expand to a single record selector.
| O(1). True when the starts are identical to the usegd indices field and
the sources are all 0's.
array, and consumers can avoid looking at the real starts and
sources fields.
Projections ----------------------------------------------------------------
INLINE trivial projections as they'll expand to a single record selector.
| O(1). Yield the overall number of segments.
| O(1). Get the length, segment index, starting index, and source id of a segment.
# INLINE_U getSeg #
| O(n). Produce a segment descriptor that describes the result of appending
of @vsegids@, and also update the @vsegids@ to point to the same segments
in the result.
# SCC "cullOnVSegids" #
Determine which of the psegs are still reachable from the vsegs.
This produces an array of flags,
eg vsegids: [0 1 1 3 5 5 6 6]
Note that psegids '2' and '4' are not in vsegids_packed.
Produce an array of used psegs.
psegids_packed: [0 1 3 5 6]
Produce an array that maps psegids in the source array onto
psegids in the result array. If a particular pseg isn't present
in the result this maps onto -1.
Note that if psegids_used has 0 in some position, then psegids_map
has -1 in the same position, corresponding to an unused pseg.
eg psegids_packed: [0 1 3 5 6]
[0 1 2 3 4]
Use the psegids_map to rewrite the packed vsegids to point to the
corresponding psegs in the result.
eg vsegids: [0 1 1 3 5 5 6 6]
vsegids': [0 1 1 2 3 3 4 4]
Rebuild the usegd. | # LANGUAGE CPP #
#include "fusion-phases.h"
module Data.Array.Parallel.Unlifted.Sequential.USSegd
USSegd(..)
, valid
, mkUSSegd
, empty
, singleton
, fromUSegd
, isContiguous
, length
, takeUSegd, takeLengths, takeIndices, takeElements
, takeSources, takeStarts
, getSeg
, appendWith
, cullOnVSegids)
where
import Data.Array.Parallel.Unlifted.Sequential.USegd (USegd)
import Data.Array.Parallel.Unlifted.Sequential.Vector (Vector)
import Data.Array.Parallel.Pretty hiding (empty)
import Prelude hiding (length)
import qualified Data.Array.Parallel.Unlifted.Sequential.USegd as USegd
import qualified Data.Array.Parallel.Unlifted.Sequential.Vector as U
import Debug.Trace
here :: String -> String
here s = "Data.Array.Parallel.Unlifted.Sequential.USSegd." ++ s
data USSegd
= USSegd
{ ussegd_contiguous :: !Bool
In this case all the data elements are in one contiguous flat
, ussegd_starts :: Vector Int
, ussegd_sources :: Vector Int
, ussegd_usegd :: !USegd
}
deriving (Show)
| Pretty print the physical representation of a ` UVSegd `
instance PprPhysical USSegd where
pprp (USSegd _ starts sources ssegd)
= vcat
[ text "USSegd"
$$ (nest 7 $ vcat
[ text "starts: " <+> (text $ show $ U.toList starts)
, text "sources: " <+> (text $ show $ U.toList sources) ])
, pprp ssegd ]
mkUSSegd
-> USSegd
mkUSSegd = USSegd False
# INLINE mkUSSegd #
valid :: USSegd -> Bool
valid (USSegd _ starts srcids usegd)
= (U.length starts == USegd.length usegd)
&& (U.length srcids == USegd.length usegd)
# NOINLINE valid #
NOINLINE because it 's only enabled during debugging anyway .
empty :: USSegd
empty = USSegd True U.empty U.empty USegd.empty
with sourceid 0 .
singleton :: Int -> USSegd
singleton n
= USSegd True (U.singleton 0) (U.singleton 0) (USegd.singleton n)
# INLINE_U singleton #
| O(segs ) . Promote a plain ` USegd ` to a ` USSegd ` .
All segments are assumed to come from a flat array with sourceid 0 .
fromUSegd :: USegd -> USSegd
fromUSegd usegd
= USSegd True
(USegd.takeIndices usegd)
(U.replicate (USegd.length usegd) 0)
usegd
In this case all the data elements are in one contiguous flat
isContiguous :: USSegd -> Bool
isContiguous = ussegd_contiguous
# INLINE isContiguous #
length :: USSegd -> Int
length = USegd.length . ussegd_usegd
# INLINE length #
| O(1 ) . Yield the ` USegd ` of a ` USSegd ` .
takeUSegd :: USSegd -> USegd
takeUSegd = ussegd_usegd
# INLINE takeUSegd #
| O(1 ) . Yield the lengths of the segments of a ` USSegd ` .
takeLengths :: USSegd -> Vector Int
takeLengths = USegd.takeLengths . ussegd_usegd
# INLINE takeLengths #
| O(1 ) . Yield the segment indices of a ` USSegd ` .
takeIndices :: USSegd -> Vector Int
takeIndices = USegd.takeIndices . ussegd_usegd
# INLINE takeIndices #
| O(1 ) . Yield the total number of elements covered by a ` USSegd ` .
takeElements :: USSegd -> Int
takeElements = USegd.takeElements . ussegd_usegd
# INLINE takeElements #
| O(1 ) . Yield the starting indices of a ` USSegd ` .
takeStarts :: USSegd -> Vector Int
takeStarts = ussegd_starts
# INLINE takeStarts #
| O(1 ) . Yield the source ids of a ` USSegd ` .
takeSources :: USSegd -> Vector Int
takeSources = ussegd_sources
# INLINE takeSources #
getSeg :: USSegd -> Int -> (Int, Int, Int, Int)
getSeg (USSegd _ starts sources usegd) ix
= let (len, ixl) = USegd.getSeg usegd ix
in ( len
, ixl
, U.index (here "getSeg") starts ix
, U.index (here "getSeg") sources ix)
Operators = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = =
two arrays .
appendWith
^ Segment descriptor of first nested array .
^ Number of flat data arrays used to represent first nested array .
^ Segment descriptor of second nested array .
^ Number of flat data arrays used to represent second nested array .
-> USSegd
appendWith
(USSegd _ starts1 srcs1 usegd1) pdatas1
(USSegd _ starts2 srcs2 usegd2) _
= traceEvent
( "dph-prim-seq: USSegd.appendWith."
++ " length(result) = " ++ show (U.length starts1 + U.length starts2))
$ USSegd False
(starts1 U.++ starts2)
(srcs1 U.++ U.map (+ pdatas1) srcs2)
(USegd.append usegd1 usegd2)
# NOINLINE appendWith #
NOINLINE because we 're worried about code explosion . Might be useful though .
| Cull the segments of a ` USSegd ` down to only those reachable from an array
cullOnVSegids :: Vector Int -> USSegd -> (Vector Int, USSegd)
cullOnVSegids vsegids (USSegd _ starts sources usegd)
traceEvent
( "dph-prim-seq: USSegd.cullOnVSegids."
++ " length(segmap) = " ++ show (U.length vsegids))
with reachable psegs corresponding to 1
and unreachable psegs corresponding to 0
= > psegids_used : [ 1 1 0 1 0 1 1 ]
!psegids_used
= U.bpermuteDft (USegd.length usegd)
(const False)
(U.zip vsegids (U.replicate (U.length vsegids) True))
eg psegids_used : [ 1 1 0 1 0 1 1 ]
!psegids_packed
= U.pack (U.enumFromTo 0 (U.length psegids_used)) psegids_used
psegids_map : [ 0 1 -1 2 -1 3 4 ]
!psegids_map
= U.bpermuteDft (USegd.length usegd)
(const (-1))
(U.zip psegids_packed (U.enumFromTo 0 (U.length psegids_packed - 1)))
psegids_map : [ 0 1 -1 2 -1 3 4 ]
!vsegids' = U.map (U.index (here "cullOnVSegids") psegids_map) vsegids
!starts' = U.pack starts psegids_used
!sources' = U.pack sources psegids_used
!lengths' = U.pack (USegd.takeLengths usegd) psegids_used
!usegd' = USegd.fromLengths lengths'
!ussegd' = USSegd False starts' sources' usegd'
in (vsegids', ussegd')
# NOINLINE cullOnVSegids #
NOINLINE because it 's complicated and wo n't fuse with anything
This can also be expensive and we want to see the SCC in profiling builds .
|
c28ad9492bb0a47d88c9b270fd3c562284eda639934fe71d92372870443860fc | fizruk/miso-aframe | Animation.hs | # LANGUAGE DeriveGeneric #
# LANGUAGE GeneralizedNewtypeDeriving #
{-# LANGUAGE OverloadedStrings #-}
# LANGUAGE RecordWildCards #
module Miso.AFrame.Core.Animation where
import Data.Aeson
import Miso
import Miso.String
import GHCJS.Marshal (ToJSVal(..))
import GHC.Generics (Generic)
import Miso.AFrame.Core.Types
import Miso.AFrame.Core.Internal.Utils
data AnimationRepeatCount
= Finite Int
| Indefinite
deriving (Generic)
-- | Determines effect of animation when not actively in play.
data AnimationFill
= AnimationBackwards
| AnimationBoth
| AnimationForwards
| AnimationNone
deriving (Generic)
data AnimationBasicEasing
= Ease
| EaseIn
| EaseOut
| EaseInOut
deriving (Generic)
data AnimationEasingGroup
= Cubic
| Quad
| Quart
| Quint
| Sine
| Expo
| Circ
| Elastic
| Back
| Bounce
deriving (Generic)
data AnimationEasing = AnimationEasing
{ animationEasingBasic :: AnimationBasicEasing
, animationEasingGroup :: Maybe AnimationEasingGroup
} deriving (Generic)
data AnimationDirection
= AnimationAlternate
| AnimationAlternateReverse
| AnimationNormal
| AnimationReverse
deriving (Show, Generic)
-- | Animation attributes.
data AnimationAttrs = AnimationAttrs
{ -- | Event name to wait on before beginning animation.
animationBegin :: Maybe MisoString
, -- | Event name to wait on before stopping animation.
animationEnd :: Maybe MisoString
, -- | Delay (in milliseconds) or event name to wait on before beginning animation.
animationDelay :: Maybe Milliseconds
, -- | Direction of the animation (between 'animationFrom' and 'animationTo').
animationDirection :: Maybe AnimationDirection
, -- | Duration in (milliseconds) of the animation.
animationDur :: Maybe Milliseconds
, -- | Easing function of the animation. There are very many to choose from.
animationEasing :: Maybe AnimationEasing
, -- | Determines effect of animation when not actively in play.
animationFill :: Maybe AnimationFill
, -- | Repeat count or 'Indefinite'.
animationRepeat :: AnimationRepeatCount
} deriving (Generic)
defaultAnimationAttrs :: AnimationAttrs
defaultAnimationAttrs = AnimationAttrs
{ animationBegin = Nothing
, animationEnd = Nothing
, animationDelay = Nothing
, animationDirection = Nothing
, animationDur = Nothing
, animationEasing = Nothing
, animationFill = Nothing
, animationRepeat = Finite 0
}
class ToJSVal a => CanAnimate a
instance CanAnimate Vec3
instance CanAnimate Bool
instance CanAnimate Float
instance CanAnimate Color
-- | A-Frame animation entity.
animation
:: CanAnimate a
=> MisoString -- ^ Name of an attribute to provide animation for.
-> Maybe a -- ^ Starting value. Specify 'Nothing' to use current value.
-> a -- ^ Ending value.
-> AnimationAttrs -- ^ Animation attributes.
-> View action
animation attrName mfrom to animAttrs = node_ "a-animation" attrs []
where
attrs
= prop "attribute" attrName
: prop "to" to
: attrsFromJSON animAttrs
++ fromProp
fromProp = case mfrom of
Nothing -> []
Just from -> [prop "from" from]
instance ToJSON AnimationFill where toJSON = gtoJSON
instance ToJSON AnimationBasicEasing where toJSON = gtoJSON
instance ToJSON AnimationEasingGroup where toJSON = gtoJSON
instance ToJSON AnimationDirection where toJSON = gtoJSON
instance ToJSON AnimationAttrs where toJSON = gtoJSON
instance ToJSON AnimationRepeatCount where
toJSON (Finite n) = toJSON n
toJSON Indefinite = "indefinite"
instance ToJSON AnimationEasing where
toJSON AnimationEasing{..} =
case (toJSON <$> animationEasingGroup, toJSON animationEasingBasic) of
(Just (String g), String b) -> String (g <> b)
(Nothing, b) -> b
_ -> Null
| null | https://raw.githubusercontent.com/fizruk/miso-aframe/eb608e86fd71f01b397cde04124ff3a949e843be/src/Miso/AFrame/Core/Animation.hs | haskell | # LANGUAGE OverloadedStrings #
| Determines effect of animation when not actively in play.
| Animation attributes.
| Event name to wait on before beginning animation.
| Event name to wait on before stopping animation.
| Delay (in milliseconds) or event name to wait on before beginning animation.
| Direction of the animation (between 'animationFrom' and 'animationTo').
| Duration in (milliseconds) of the animation.
| Easing function of the animation. There are very many to choose from.
| Determines effect of animation when not actively in play.
| Repeat count or 'Indefinite'.
| A-Frame animation entity.
^ Name of an attribute to provide animation for.
^ Starting value. Specify 'Nothing' to use current value.
^ Ending value.
^ Animation attributes. | # LANGUAGE DeriveGeneric #
# LANGUAGE GeneralizedNewtypeDeriving #
# LANGUAGE RecordWildCards #
module Miso.AFrame.Core.Animation where
import Data.Aeson
import Miso
import Miso.String
import GHCJS.Marshal (ToJSVal(..))
import GHC.Generics (Generic)
import Miso.AFrame.Core.Types
import Miso.AFrame.Core.Internal.Utils
data AnimationRepeatCount
= Finite Int
| Indefinite
deriving (Generic)
data AnimationFill
= AnimationBackwards
| AnimationBoth
| AnimationForwards
| AnimationNone
deriving (Generic)
data AnimationBasicEasing
= Ease
| EaseIn
| EaseOut
| EaseInOut
deriving (Generic)
data AnimationEasingGroup
= Cubic
| Quad
| Quart
| Quint
| Sine
| Expo
| Circ
| Elastic
| Back
| Bounce
deriving (Generic)
data AnimationEasing = AnimationEasing
{ animationEasingBasic :: AnimationBasicEasing
, animationEasingGroup :: Maybe AnimationEasingGroup
} deriving (Generic)
data AnimationDirection
= AnimationAlternate
| AnimationAlternateReverse
| AnimationNormal
| AnimationReverse
deriving (Show, Generic)
data AnimationAttrs = AnimationAttrs
animationBegin :: Maybe MisoString
animationEnd :: Maybe MisoString
animationDelay :: Maybe Milliseconds
animationDirection :: Maybe AnimationDirection
animationDur :: Maybe Milliseconds
animationEasing :: Maybe AnimationEasing
animationFill :: Maybe AnimationFill
animationRepeat :: AnimationRepeatCount
} deriving (Generic)
defaultAnimationAttrs :: AnimationAttrs
defaultAnimationAttrs = AnimationAttrs
{ animationBegin = Nothing
, animationEnd = Nothing
, animationDelay = Nothing
, animationDirection = Nothing
, animationDur = Nothing
, animationEasing = Nothing
, animationFill = Nothing
, animationRepeat = Finite 0
}
class ToJSVal a => CanAnimate a
instance CanAnimate Vec3
instance CanAnimate Bool
instance CanAnimate Float
instance CanAnimate Color
animation
:: CanAnimate a
-> View action
animation attrName mfrom to animAttrs = node_ "a-animation" attrs []
where
attrs
= prop "attribute" attrName
: prop "to" to
: attrsFromJSON animAttrs
++ fromProp
fromProp = case mfrom of
Nothing -> []
Just from -> [prop "from" from]
instance ToJSON AnimationFill where toJSON = gtoJSON
instance ToJSON AnimationBasicEasing where toJSON = gtoJSON
instance ToJSON AnimationEasingGroup where toJSON = gtoJSON
instance ToJSON AnimationDirection where toJSON = gtoJSON
instance ToJSON AnimationAttrs where toJSON = gtoJSON
instance ToJSON AnimationRepeatCount where
toJSON (Finite n) = toJSON n
toJSON Indefinite = "indefinite"
instance ToJSON AnimationEasing where
toJSON AnimationEasing{..} =
case (toJSON <$> animationEasingGroup, toJSON animationEasingBasic) of
(Just (String g), String b) -> String (g <> b)
(Nothing, b) -> b
_ -> Null
|
9de55f346b35fd25c1bfa16aa121779d74fd275c1b45499a8397ed69014702c7 | s-k/simplecs | tags.clj | (ns simplecs.tags
(:require [simplecs.core :as sc :refer (defcomponent defcomponentsystem)]))
(defn tag
"Instead of defining a component '(defcomponent foo [])' and
adding '(foo)' to an entity, you can just add '(tag :foo)'
to the entity. There MUST NOT exist a component with the
same name as the tag keyword."
[tag]
{:pre [(keyword? tag)]}
{:name tag})
(defcomponent tag-to-entity
"Converts a tag stored in a component into the entity containing
the tag. The system 'tag-to-entity-converter' has to be included
in the CES. '(tag-to-entity [:foo :bar])' reads the entry :bar
of the component 'foo'. This entry should contain a keyword
representing a tag. The keyword is then replaced with the
entity which contains this tag. The 'tag-to-entity' component
is removed after it was applied."
[& paths]
{:paths paths})
(defcomponentsystem tag-to-entity-converter :tag-to-entity
"Applies the 'tag-to-entity' component. It is adviced that
this system is the first in the list of systems."
[]
[ces entity component]
(let [converted-ces (reduce (fn [ces path]
(sc/update-entity ces
entity
path
#(first (sc/entities-with-component ces %))))
ces
(:paths component))]
(sc/remove-component converted-ces entity :tag-to-entity))) | null | https://raw.githubusercontent.com/s-k/simplecs/a2db6d53e2cc1170fc9328c9b5ce2ec70c30637a/src/simplecs/tags.clj | clojure | (ns simplecs.tags
(:require [simplecs.core :as sc :refer (defcomponent defcomponentsystem)]))
(defn tag
"Instead of defining a component '(defcomponent foo [])' and
adding '(foo)' to an entity, you can just add '(tag :foo)'
to the entity. There MUST NOT exist a component with the
same name as the tag keyword."
[tag]
{:pre [(keyword? tag)]}
{:name tag})
(defcomponent tag-to-entity
"Converts a tag stored in a component into the entity containing
the tag. The system 'tag-to-entity-converter' has to be included
in the CES. '(tag-to-entity [:foo :bar])' reads the entry :bar
of the component 'foo'. This entry should contain a keyword
representing a tag. The keyword is then replaced with the
entity which contains this tag. The 'tag-to-entity' component
is removed after it was applied."
[& paths]
{:paths paths})
(defcomponentsystem tag-to-entity-converter :tag-to-entity
"Applies the 'tag-to-entity' component. It is adviced that
this system is the first in the list of systems."
[]
[ces entity component]
(let [converted-ces (reduce (fn [ces path]
(sc/update-entity ces
entity
path
#(first (sc/entities-with-component ces %))))
ces
(:paths component))]
(sc/remove-component converted-ces entity :tag-to-entity))) | |
5589573f9fabb47d7b4e04ee65a61b898cb05b6ddacd0e330488309acfe0c344 | facebook/duckling | DA_XX.hs | Copyright ( c ) 2016 - present , Facebook , Inc.
-- All rights reserved.
--
-- This source code is licensed under the BSD-style license found in the
-- LICENSE file in the root directory of this source tree. An additional grant
of patent rights can be found in the PATENTS file in the same directory .
-----------------------------------------------------------------
-- Auto-generated by regenClassifiers
--
-- DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
@generated
-----------------------------------------------------------------
{-# LANGUAGE OverloadedStrings #-}
module Duckling.Ranking.Classifiers.DA_XX (classifiers) where
import Data.String
import Prelude
import qualified Data.HashMap.Strict as HashMap
import Duckling.Ranking.Types
classifiers :: Classifiers
classifiers
= HashMap.fromList
[("<time> timezone",
Classifier{okData =
ClassData{prior = 0.0, unseen = -3.6109179126442243,
likelihoods =
HashMap.fromList
[("at <time-of-day>", -1.2809338454620642),
("hh:mm", -1.5040773967762742), ("hour", -1.9740810260220096),
("minute", -1.0185695809945732)],
n = 16},
koData =
ClassData{prior = -infinity, unseen = -1.6094379124341003,
likelihoods = HashMap.fromList [], n = 0}}),
("Thursday",
Classifier{okData =
ClassData{prior = 0.0, unseen = -3.0910424533583156,
likelihoods = HashMap.fromList [("", 0.0)], n = 20},
koData =
ClassData{prior = -infinity, unseen = -0.6931471805599453,
likelihoods = HashMap.fromList [], n = 0}}),
("integer (numeric)",
Classifier{okData =
ClassData{prior = -0.7872986116830517,
unseen = -5.0689042022202315,
likelihoods = HashMap.fromList [("", 0.0)], n = 157},
koData =
ClassData{prior = -0.6071024542014105, unseen = -5.247024072160486,
likelihoods = HashMap.fromList [("", 0.0)], n = 188}}),
("the day before yesterday",
Classifier{okData =
ClassData{prior = 0.0, unseen = -1.0986122886681098,
likelihoods = HashMap.fromList [("", 0.0)], n = 1},
koData =
ClassData{prior = -infinity, unseen = -0.6931471805599453,
likelihoods = HashMap.fromList [], n = 0}}),
("lunch",
Classifier{okData =
ClassData{prior = 0.0, unseen = -1.0986122886681098,
likelihoods = HashMap.fromList [("", 0.0)], n = 1},
koData =
ClassData{prior = -infinity, unseen = -0.6931471805599453,
likelihoods = HashMap.fromList [], n = 0}}),
("<time> <part-of-day>",
Classifier{okData =
ClassData{prior = 0.0, unseen = -2.639057329615259,
likelihoods =
HashMap.fromList
[("dayhour", -0.9555114450274363),
("tomorrowevening", -1.8718021769015913),
("tomorrowlunch", -1.8718021769015913),
("yesterdayevening", -1.8718021769015913),
("Mondaymorning", -1.8718021769015913)],
n = 4},
koData =
ClassData{prior = -infinity, unseen = -1.791759469228055,
likelihoods = HashMap.fromList [], n = 0}}),
("dd/mm",
Classifier{okData =
ClassData{prior = -0.40546510810816444,
unseen = -2.3025850929940455,
likelihoods = HashMap.fromList [("", 0.0)], n = 8},
koData =
ClassData{prior = -1.0986122886681098, unseen = -1.791759469228055,
likelihoods = HashMap.fromList [("", 0.0)], n = 4}}),
("today",
Classifier{okData =
ClassData{prior = -0.1823215567939546,
unseen = -1.9459101490553135,
likelihoods = HashMap.fromList [("", 0.0)], n = 5},
koData =
ClassData{prior = -1.791759469228055, unseen = -1.0986122886681098,
likelihoods = HashMap.fromList [("", 0.0)], n = 1}}),
("at <time-of-day>",
Classifier{okData =
ClassData{prior = -0.2674793651342615, unseen = -5.062595033026967,
likelihoods =
HashMap.fromList
[("<time> timezone", -3.264486336120253),
("time-of-day (latent)", -1.2495833155779883),
("relative minutes after|past <integer> (hour-of-day)",
-3.9576335166801986),
("hh:mm", -1.9652033519899923),
("<time-of-day> sharp", -3.6699514442284173),
("hour", -1.1850447944404172), ("minute", -1.688949975361834)],
n = 75},
koData =
ClassData{prior = -1.4494732627414222,
unseen = -3.9889840465642745,
likelihoods =
HashMap.fromList
[("time-of-day (latent)", -0.7922380832041762),
("hour", -0.7922380832041762)],
n = 23}}),
("absorption of , after named day",
Classifier{okData =
ClassData{prior = 0.0, unseen = -3.1354942159291497,
likelihoods =
HashMap.fromList
[("Wednesday", -2.3978952727983707),
("Saturday", -1.9924301646902063),
("Monday", -1.9924301646902063),
("Friday", -1.9924301646902063), ("day", -0.8938178760220964),
("Sunday", -2.3978952727983707)],
n = 8},
koData =
ClassData{prior = -infinity, unseen = -1.9459101490553135,
likelihoods = HashMap.fromList [], n = 0}}),
("September",
Classifier{okData =
ClassData{prior = 0.0, unseen = -2.0794415416798357,
likelihoods = HashMap.fromList [("", 0.0)], n = 6},
koData =
ClassData{prior = -infinity, unseen = -0.6931471805599453,
likelihoods = HashMap.fromList [], n = 0}}),
("tonight",
Classifier{okData =
ClassData{prior = 0.0, unseen = -1.0986122886681098,
likelihoods = HashMap.fromList [("", 0.0)], n = 1},
koData =
ClassData{prior = -infinity, unseen = -0.6931471805599453,
likelihoods = HashMap.fromList [], n = 0}}),
("on <date>",
Classifier{okData =
ClassData{prior = 0.0, unseen = -3.784189633918261,
likelihoods =
HashMap.fromList
[("Thursday", -1.6817585740137264),
("Saturday", -3.068052935133617),
("Monday", -3.068052935133617),
("the <day-of-month> (non ordinal)", -2.662587827025453),
("<day-of-month>(ordinal) <named-month>", -1.8152899666382492),
("day", -0.8167611365271219),
("the <day-of-month> (ordinal)", -3.068052935133617)],
n = 18},
koData =
ClassData{prior = -infinity, unseen = -2.0794415416798357,
likelihoods = HashMap.fromList [], n = 0}}),
("integer (0..19)",
Classifier{okData =
ClassData{prior = -0.13976194237515874,
unseen = -3.7376696182833684,
likelihoods = HashMap.fromList [("", 0.0)], n = 40},
koData =
ClassData{prior = -2.03688192726104, unseen = -2.0794415416798357,
likelihoods = HashMap.fromList [("", 0.0)], n = 6}}),
("between <time-of-day> and <time-of-day> (interval)",
Classifier{okData =
ClassData{prior = -0.2876820724517809,
unseen = -2.3978952727983707,
likelihoods =
HashMap.fromList
[("hh:mmtime-of-day (latent)", -1.6094379124341003),
("minuteminute", -1.2039728043259361),
("hh:mmhh:mm", -1.2039728043259361),
("minutehour", -1.6094379124341003)],
n = 3},
koData =
ClassData{prior = -1.3862943611198906,
unseen = -1.9459101490553135,
likelihoods =
HashMap.fromList
[("hh:mmtime-of-day (latent)", -1.0986122886681098),
("minutehour", -1.0986122886681098)],
n = 1}}),
("between <datetime> and <datetime> (interval)",
Classifier{okData =
ClassData{prior = -0.5108256237659907,
unseen = -2.4849066497880004,
likelihoods =
HashMap.fromList
[("hh:mmtime-of-day (latent)", -1.7047480922384253),
("minuteminute", -1.2992829841302609),
("hh:mmhh:mm", -1.2992829841302609),
("minutehour", -1.7047480922384253)],
n = 3},
koData =
ClassData{prior = -0.916290731874155, unseen = -2.3025850929940455,
likelihoods =
HashMap.fromList
[("hh:mmtime-of-day (latent)", -1.5040773967762742),
("minuteminute", -1.5040773967762742),
("minutehour", -1.5040773967762742),
("hh:mmintersect", -1.5040773967762742)],
n = 2}}),
("October",
Classifier{okData =
ClassData{prior = 0.0, unseen = -2.70805020110221,
likelihoods = HashMap.fromList [("", 0.0)], n = 13},
koData =
ClassData{prior = -infinity, unseen = -0.6931471805599453,
likelihoods = HashMap.fromList [], n = 0}}),
("month (grain)",
Classifier{okData =
ClassData{prior = 0.0, unseen = -2.3978952727983707,
likelihoods = HashMap.fromList [("", 0.0)], n = 9},
koData =
ClassData{prior = -infinity, unseen = -0.6931471805599453,
likelihoods = HashMap.fromList [], n = 0}}),
("<integer> more <unit-of-duration>",
Classifier{okData =
ClassData{prior = 0.0, unseen = -2.0794415416798357,
likelihoods =
HashMap.fromList
[("integer (numeric)minute (grain)", -1.252762968495368),
("integer (0..19)minute (grain)", -1.252762968495368),
("minute", -0.8472978603872037)],
n = 2},
koData =
ClassData{prior = -infinity, unseen = -1.3862943611198906,
likelihoods = HashMap.fromList [], n = 0}}),
("<time-of-day> o'clock",
Classifier{okData =
ClassData{prior = 0.0, unseen = -1.6094379124341003,
likelihoods =
HashMap.fromList
[("time-of-day (latent)", -0.6931471805599453),
("hour", -0.6931471805599453)],
n = 1},
koData =
ClassData{prior = -infinity, unseen = -1.0986122886681098,
likelihoods = HashMap.fromList [], n = 0}}),
("Wednesday",
Classifier{okData =
ClassData{prior = 0.0, unseen = -2.5649493574615367,
likelihoods = HashMap.fromList [("", 0.0)], n = 11},
koData =
ClassData{prior = -infinity, unseen = -0.6931471805599453,
likelihoods = HashMap.fromList [], n = 0}}),
("July",
Classifier{okData =
ClassData{prior = 0.0, unseen = -2.3025850929940455,
likelihoods = HashMap.fromList [("", 0.0)], n = 8},
koData =
ClassData{prior = -infinity, unseen = -0.6931471805599453,
likelihoods = HashMap.fromList [], n = 0}}),
("hour (grain)",
Classifier{okData =
ClassData{prior = -0.10536051565782628,
unseen = -2.3978952727983707,
likelihoods = HashMap.fromList [("", 0.0)], n = 9},
koData =
ClassData{prior = -2.3025850929940455,
unseen = -1.0986122886681098,
likelihoods = HashMap.fromList [("", 0.0)], n = 1}}),
("<ordinal> quarter",
Classifier{okData =
ClassData{prior = -0.6931471805599453,
unseen = -2.0794415416798357,
likelihoods =
HashMap.fromList
[("ordinal (digits)quarter (grain)", -1.252762968495368),
("quarter", -0.8472978603872037),
("ordinals (first..19st)quarter (grain)", -1.252762968495368)],
n = 2},
koData =
ClassData{prior = -0.6931471805599453,
unseen = -2.0794415416798357,
likelihoods =
HashMap.fromList
[("ordinal (digits)quarter (grain)", -1.252762968495368),
("quarter", -0.8472978603872037),
("ordinals (first..19st)quarter (grain)", -1.252762968495368)],
n = 2}}),
("intersect",
Classifier{okData =
ClassData{prior = -0.3657247794586503, unseen = -5.955837369464831,
likelihoods =
HashMap.fromList
[("<datetime> - <datetime> (interval)on <date>",
-4.161483865059729),
("Wednesday<named-month> <day-of-month> (non ordinal)",
-5.260096153727839),
("<time-of-day> - <time-of-day> (interval)on <date>",
-4.161483865059729),
("hourday", -5.260096153727839),
("dayhour", -2.5520459526256287),
("daymonth", -3.468336684499784),
("monthyear", -3.180654612048003),
("Mondayon <date>", -5.260096153727839),
("intersecthh:mm", -5.260096153727839),
("Wednesdaynext <cycle>", -5.260096153727839),
("Marchyear", -4.854631045619675),
("intersect by \"of\", \"from\", \"'s\"year",
-4.854631045619675),
("Mondayintersect", -5.260096153727839),
("last <day-of-week> of <time>year", -4.854631045619675),
("todayat <time-of-day>", -4.343805421853684),
("Thursday<time> timezone", -3.8738017926079484),
("dayday", -3.180654612048003),
("dd/mmat <time-of-day>", -4.007333185232471),
("the <day-of-month> (ordinal)February", -4.854631045619675),
("intersect by \",\"hh:mm", -4.343805421853684),
("Mondaythe <day-of-month> (non ordinal)", -5.260096153727839),
("Thursdaybetween <time-of-day> and <time-of-day> (interval)",
-4.854631045619675),
("dayyear", -3.5553480614894135),
("Thursdaybetween <datetime> and <datetime> (interval)",
-4.854631045619675),
("Thursdayat <time-of-day>", -4.161483865059729),
("tomorrow<time-of-day> sharp", -4.566948973167894),
("<day-of-month>(ordinal) <named-month>year",
-4.854631045619675),
("absorption of , after named day<named-month> <day-of-month> (non ordinal)",
-4.343805421853684),
("Thursdayfrom <datetime> - <datetime> (interval)",
-4.566948973167894),
("Thursdayfrom <time-of-day> - <time-of-day> (interval)",
-4.566948973167894),
("tomorrowuntil <time-of-day>", -4.566948973167894),
("absorption of , after named day<day-of-month> (non ordinal) <named-month>",
-4.343805421853684),
("the <day-of-month> (ordinal)March", -4.343805421853684),
("after <time-of-day>at <time-of-day>", -4.566948973167894),
("intersect by \",\"<day-of-month> (non ordinal) <named-month>",
-4.343805421853684),
("Mondaythe <day-of-month> (ordinal)", -5.260096153727839),
("tomorrowafter <time-of-day>", -4.566948973167894),
("from <time-of-day> - <time-of-day> (interval)on <date>",
-4.343805421853684),
("Sunday<day-of-month> (non ordinal) <named-month>",
-5.260096153727839),
("dayminute", -2.4268828096716226),
("Tuesdaythis <time>", -4.566948973167894),
("from <datetime> - <datetime> (interval)on <date>",
-4.566948973167894),
("<ordinal> <cycle> of <time>year", -4.854631045619675),
("minuteday", -2.519256129802638),
("absorption of , after named dayintersect",
-5.260096153727839),
("Octoberyear", -3.6506582412937383),
("yearhh:mm", -5.260096153727839),
("absorption of , after named dayintersect by \",\"",
-4.854631045619675),
("Septemberyear", -4.343805421853684),
("at <time-of-day>on <date>", -5.260096153727839),
("between <time-of-day> and <time-of-day> (interval)on <date>",
-5.260096153727839),
("between <datetime> and <datetime> (interval)on <date>",
-5.260096153727839),
("dayweek", -4.161483865059729),
("weekyear", -4.343805421853684),
("hh:mmtomorrow", -4.343805421853684),
("this <cycle>at <time-of-day>", -4.854631045619675),
("Wednesdaythis <time>", -4.343805421853684),
("tomorrowat <time-of-day>", -3.6506582412937383),
("at <time-of-day>tomorrow", -4.566948973167894),
("intersectFebruary", -5.260096153727839),
("last <cycle> of <time>year", -4.343805421853684),
("Mondaythis <time>", -5.260096153727839),
("<day-of-month> (non ordinal) <named-month>year",
-4.854631045619675),
("yearminute", -5.260096153727839)],
n = 154},
koData =
ClassData{prior = -1.1831696766961728, unseen = -5.365976015021851,
likelihoods =
HashMap.fromList
[("dayhour", -2.7963428082478883),
("daymonth", -2.2702497123511094),
("monthyear", -3.751854253275325),
("Marchyear", -3.9749978045895347),
("intersect by \"of\", \"from\", \"'s\"year",
-3.4153820166541116),
("Sundaythis <time>", -3.751854253275325),
("absorption of , after named dayJuly", -4.2626798770413155),
("dd/mmat <time-of-day>", -3.4153820166541116),
("hourhour", -3.0587070727153796),
("dayyear", -3.2818506240295893),
("Thursdayat <time-of-day>", -2.9633968929110543),
("Thursdayhh:mm", -4.2626798770413155),
("WednesdayFebruary", -4.66814498514948),
("Thursdayfrom <datetime> - <datetime> (interval)",
-4.66814498514948),
("in|during the <part-of-day>until <time-of-day>",
-3.9749978045895347),
("Aprilyear", -4.66814498514948),
("until <time-of-day>on <date>", -4.66814498514948),
("dayminute", -3.2818506240295893),
("Tuesdaythis <time>", -3.751854253275325),
("minuteday", -3.164067588373206),
("hh:mmon <date>", -3.2818506240295893),
("Wednesdaythis <time>", -3.4153820166541116),
("absorption of , after named dayFebruary",
-4.2626798770413155),
("this <part-of-day>until <time-of-day>", -3.9749978045895347),
("tomorrownoon", -4.66814498514948),
("this <time>until <time-of-day>", -3.9749978045895347),
("Mondaythis <time>", -4.2626798770413155)],
n = 68}}),
("<ordinal> <cycle> of <time>",
Classifier{okData =
ClassData{prior = 0.0, unseen = -2.890371757896165,
likelihoods =
HashMap.fromList
[("daymonth", -1.7346010553881064),
("ordinals (first..19st)week (grain)intersect",
-1.7346010553881064),
("ordinals (first..19st)week (grain)October",
-1.7346010553881064),
("weekmonth", -1.2237754316221157),
("ordinals (first..19st)day (grain)October",
-1.7346010553881064)],
n = 6},
koData =
ClassData{prior = -infinity, unseen = -1.791759469228055,
likelihoods = HashMap.fromList [], n = 0}}),
("season",
Classifier{okData =
ClassData{prior = 0.0, unseen = -1.791759469228055,
likelihoods = HashMap.fromList [("", 0.0)], n = 4},
koData =
ClassData{prior = -infinity, unseen = -0.6931471805599453,
likelihoods = HashMap.fromList [], n = 0}}),
("year (grain)",
Classifier{okData =
ClassData{prior = 0.0, unseen = -2.772588722239781,
likelihoods = HashMap.fromList [("", 0.0)], n = 14},
koData =
ClassData{prior = -infinity, unseen = -0.6931471805599453,
likelihoods = HashMap.fromList [], n = 0}}),
("from <datetime> - <datetime> (interval)",
Classifier{okData =
ClassData{prior = -0.8472978603872037,
unseen = -3.1780538303479458,
likelihoods =
HashMap.fromList
[("hh:mmtime-of-day (latent)", -2.4423470353692043),
("minuteminute", -1.5260563034950494),
("time-of-day (latent)time-of-day (latent)",
-2.4423470353692043),
("hh:mmhh:mm", -1.5260563034950494),
("hourhour", -2.4423470353692043),
("minutehour", -2.4423470353692043)],
n = 6},
koData =
ClassData{prior = -0.5596157879354228, unseen = -3.332204510175204,
likelihoods =
HashMap.fromList
[("yearhour", -2.6026896854443837),
("hh:mmtime-of-day (latent)", -1.9095425048844386),
("minuteminute", -1.9095425048844386),
("yearyear", -2.6026896854443837),
("year (latent)year (latent)", -2.6026896854443837),
("minutehour", -1.9095425048844386),
("hh:mmintersect", -1.9095425048844386),
("year (latent)time-of-day (latent)", -2.6026896854443837)],
n = 8}}),
("Saturday",
Classifier{okData =
ClassData{prior = 0.0, unseen = -2.0794415416798357,
likelihoods = HashMap.fromList [("", 0.0)], n = 6},
koData =
ClassData{prior = -infinity, unseen = -0.6931471805599453,
likelihoods = HashMap.fromList [], n = 0}}),
("next <cycle>",
Classifier{okData =
ClassData{prior = 0.0, unseen = -3.044522437723423,
likelihoods =
HashMap.fromList
[("week", -1.6094379124341003),
("month (grain)", -2.3025850929940455),
("year (grain)", -2.3025850929940455),
("week (grain)", -1.6094379124341003),
("quarter", -2.3025850929940455), ("year", -2.3025850929940455),
("month", -2.3025850929940455),
("quarter (grain)", -2.3025850929940455)],
n = 6},
koData =
ClassData{prior = -infinity, unseen = -2.1972245773362196,
likelihoods = HashMap.fromList [], n = 0}}),
("number.number hours",
Classifier{okData =
ClassData{prior = 0.0, unseen = -1.0986122886681098,
likelihoods = HashMap.fromList [("", 0.0)], n = 1},
koData =
ClassData{prior = -infinity, unseen = -0.6931471805599453,
likelihoods = HashMap.fromList [], n = 0}}),
("from <time-of-day> - <time-of-day> (interval)",
Classifier{okData =
ClassData{prior = -0.45198512374305727,
unseen = -3.044522437723423,
likelihoods =
HashMap.fromList
[("hh:mmtime-of-day (latent)", -2.3025850929940455),
("minuteminute", -1.2039728043259361),
("time-of-day (latent)time-of-day (latent)",
-2.3025850929940455),
("hh:mmhh:mm", -1.2039728043259361),
("hourhour", -2.3025850929940455),
("minutehour", -2.3025850929940455)],
n = 7},
koData =
ClassData{prior = -1.0116009116784799, unseen = -2.70805020110221,
likelihoods =
HashMap.fromList
[("hh:mmtime-of-day (latent)", -1.0296194171811581),
("minutehour", -1.0296194171811581)],
n = 4}}),
("yyyy-mm-dd",
Classifier{okData =
ClassData{prior = 0.0, unseen = -1.3862943611198906,
likelihoods = HashMap.fromList [("", 0.0)], n = 2},
koData =
ClassData{prior = -infinity, unseen = -0.6931471805599453,
likelihoods = HashMap.fromList [], n = 0}}),
("year (latent)",
Classifier{okData =
ClassData{prior = -infinity, unseen = -1.0986122886681098,
likelihoods = HashMap.fromList [], n = 0},
koData =
ClassData{prior = 0.0, unseen = -3.1354942159291497,
likelihoods =
HashMap.fromList
[("integer (numeric)", -0.5260930958967791),
("integer (0..19)", -0.8938178760220964)],
n = 20}}),
("Monday",
Classifier{okData =
ClassData{prior = 0.0, unseen = -2.4849066497880004,
likelihoods = HashMap.fromList [("", 0.0)], n = 10},
koData =
ClassData{prior = -infinity, unseen = -0.6931471805599453,
likelihoods = HashMap.fromList [], n = 0}}),
("dd/mm/yyyy",
Classifier{okData =
ClassData{prior = 0.0, unseen = -2.1972245773362196,
likelihoods = HashMap.fromList [("", 0.0)], n = 7},
koData =
ClassData{prior = -infinity, unseen = -0.6931471805599453,
likelihoods = HashMap.fromList [], n = 0}}),
("yesterday",
Classifier{okData =
ClassData{prior = 0.0, unseen = -1.3862943611198906,
likelihoods = HashMap.fromList [("", 0.0)], n = 2},
koData =
ClassData{prior = -infinity, unseen = -0.6931471805599453,
likelihoods = HashMap.fromList [], n = 0}}),
("<ordinal> quarter <year>",
Classifier{okData =
ClassData{prior = 0.0, unseen = -2.0794415416798357,
likelihoods =
HashMap.fromList
[("ordinals (first..19st)quarter (grain)year",
-1.252762968495368),
("quarteryear", -0.8472978603872037),
("ordinal (digits)quarter (grain)year", -1.252762968495368)],
n = 2},
koData =
ClassData{prior = -infinity, unseen = -1.3862943611198906,
likelihoods = HashMap.fromList [], n = 0}}),
("after lunch",
Classifier{okData =
ClassData{prior = 0.0, unseen = -1.0986122886681098,
likelihoods = HashMap.fromList [("", 0.0)], n = 1},
koData =
ClassData{prior = -infinity, unseen = -0.6931471805599453,
likelihoods = HashMap.fromList [], n = 0}}),
("hh:mm:ss",
Classifier{okData =
ClassData{prior = 0.0, unseen = -1.0986122886681098,
likelihoods = HashMap.fromList [("", 0.0)], n = 1},
koData =
ClassData{prior = -infinity, unseen = -0.6931471805599453,
likelihoods = HashMap.fromList [], n = 0}}),
("the <cycle> after <time>",
Classifier{okData =
ClassData{prior = -infinity, unseen = -1.0986122886681098,
likelihoods = HashMap.fromList [], n = 0},
koData =
ClassData{prior = 0.0, unseen = -1.9459101490553135,
likelihoods =
HashMap.fromList
[("year (grain)christmas eve", -0.6931471805599453),
("yearday", -0.6931471805599453)],
n = 2}}),
("quarter to|till|before <integer> (hour-of-day)",
Classifier{okData =
ClassData{prior = 0.0, unseen = -1.9459101490553135,
likelihoods =
HashMap.fromList
[("time-of-day (latent)", -0.6931471805599453),
("hour", -0.6931471805599453)],
n = 2},
koData =
ClassData{prior = -infinity, unseen = -1.0986122886681098,
likelihoods = HashMap.fromList [], n = 0}}),
("a pair",
Classifier{okData =
ClassData{prior = 0.0, unseen = -1.0986122886681098,
likelihoods = HashMap.fromList [("", 0.0)], n = 1},
koData =
ClassData{prior = -infinity, unseen = -0.6931471805599453,
likelihoods = HashMap.fromList [], n = 0}}),
("nth <time> of <time>",
Classifier{okData =
ClassData{prior = -0.5596157879354228,
unseen = -3.1354942159291497,
likelihoods =
HashMap.fromList
[("daymonth", -0.8938178760220964),
("ordinals (first..19st)Tuesdayintersect", -1.9924301646902063),
("ordinals (first..19st)TuesdayOctober", -1.9924301646902063),
("ordinals (first..19st)Wednesdayintersect",
-1.4816045409242156)],
n = 8},
koData =
ClassData{prior = -0.8472978603872037,
unseen = -2.9444389791664407,
likelihoods =
HashMap.fromList
[("daymonth", -0.9444616088408514),
("ordinals (first..19st)WednesdayOctober", -1.2809338454620642),
("ordinals (first..19st)TuesdaySeptember", -1.791759469228055)],
n = 6}}),
("the <day-of-month> (non ordinal)",
Classifier{okData =
ClassData{prior = 0.0, unseen = -2.3025850929940455,
likelihoods = HashMap.fromList [("integer (numeric)", 0.0)],
n = 8},
koData =
ClassData{prior = -infinity, unseen = -0.6931471805599453,
likelihoods = HashMap.fromList [], n = 0}}),
("April",
Classifier{okData =
ClassData{prior = 0.0, unseen = -1.6094379124341003,
likelihoods = HashMap.fromList [("", 0.0)], n = 3},
koData =
ClassData{prior = -infinity, unseen = -0.6931471805599453,
likelihoods = HashMap.fromList [], n = 0}}),
("week (grain)",
Classifier{okData =
ClassData{prior = 0.0, unseen = -3.332204510175204,
likelihoods = HashMap.fromList [("", 0.0)], n = 26},
koData =
ClassData{prior = -infinity, unseen = -0.6931471805599453,
likelihoods = HashMap.fromList [], n = 0}}),
("now",
Classifier{okData =
ClassData{prior = -1.0986122886681098,
unseen = -1.3862943611198906,
likelihoods = HashMap.fromList [("", 0.0)], n = 2},
koData =
ClassData{prior = -0.40546510810816444,
unseen = -1.791759469228055,
likelihoods = HashMap.fromList [("", 0.0)], n = 4}}),
("this <part-of-day>",
Classifier{okData =
ClassData{prior = -2.1972245773362196,
unseen = -2.0794415416798357,
likelihoods =
HashMap.fromList
[("hour", -0.8472978603872037), ("evening", -1.252762968495368),
("morning", -1.252762968495368)],
n = 2},
koData =
ClassData{prior = -0.11778303565638351, unseen = -3.58351893845611,
likelihoods =
HashMap.fromList
[("hour", -0.7221347174331976),
("morning", -0.7221347174331976)],
n = 16}}),
("christmas eve",
Classifier{okData =
ClassData{prior = 0.0, unseen = -1.3862943611198906,
likelihoods = HashMap.fromList [("", 0.0)], n = 2},
koData =
ClassData{prior = -infinity, unseen = -0.6931471805599453,
likelihoods = HashMap.fromList [], n = 0}}),
("<day-of-month>(ordinal) <named-month>",
Classifier{okData =
ClassData{prior = -9.53101798043249e-2,
unseen = -3.258096538021482,
likelihoods =
HashMap.fromList
[("ordinals (first..19st)March", -1.8325814637483102),
("ordinal (digits)February", -1.8325814637483102),
("month", -0.8209805520698302),
("ordinal (digits)March", -1.6094379124341003)],
n = 10},
koData =
ClassData{prior = -2.3978952727983707,
unseen = -2.0794415416798357,
likelihoods =
HashMap.fromList
[("ordinals (first..19st)April", -1.252762968495368),
("month", -1.252762968495368)],
n = 1}}),
("numbers prefix with -, negative or minus",
Classifier{okData =
ClassData{prior = -infinity, unseen = -0.6931471805599453,
likelihoods = HashMap.fromList [], n = 0},
koData =
ClassData{prior = 0.0, unseen = -3.4339872044851463,
likelihoods = HashMap.fromList [("integer (numeric)", 0.0)],
n = 29}}),
("Friday",
Classifier{okData =
ClassData{prior = 0.0, unseen = -2.3025850929940455,
likelihoods = HashMap.fromList [("", 0.0)], n = 8},
koData =
ClassData{prior = -infinity, unseen = -0.6931471805599453,
likelihoods = HashMap.fromList [], n = 0}}),
("in|during the <part-of-day>",
Classifier{okData =
ClassData{prior = -2.1972245773362196,
unseen = -2.1972245773362196,
likelihoods =
HashMap.fromList
[("afternoon", -1.3862943611198906),
("hour", -0.9808292530117262),
("evening", -1.3862943611198906)],
n = 2},
koData =
ClassData{prior = -0.11778303565638351,
unseen = -3.6109179126442243,
likelihoods =
HashMap.fromList
[("hour", -0.750305594399894), ("morning", -0.750305594399894)],
n = 16}}),
("new year's eve",
Classifier{okData =
ClassData{prior = 0.0, unseen = -1.3862943611198906,
likelihoods = HashMap.fromList [("", 0.0)], n = 2},
koData =
ClassData{prior = -infinity, unseen = -0.6931471805599453,
likelihoods = HashMap.fromList [], n = 0}}),
("tomorrow",
Classifier{okData =
ClassData{prior = 0.0, unseen = -3.044522437723423,
likelihoods = HashMap.fromList [("", 0.0)], n = 19},
koData =
ClassData{prior = -infinity, unseen = -0.6931471805599453,
likelihoods = HashMap.fromList [], n = 0}}),
("<cycle> after <time>",
Classifier{okData =
ClassData{prior = -infinity, unseen = -1.0986122886681098,
likelihoods = HashMap.fromList [], n = 0},
koData =
ClassData{prior = 0.0, unseen = -1.9459101490553135,
likelihoods =
HashMap.fromList
[("year (grain)christmas eve", -0.6931471805599453),
("yearday", -0.6931471805599453)],
n = 2}}),
("<time> after next",
Classifier{okData =
ClassData{prior = 0.0, unseen = -2.639057329615259,
likelihoods =
HashMap.fromList
[("Wednesday", -1.8718021769015913),
("Friday", -1.466337068793427), ("day", -1.1786549963416462),
("March", -1.8718021769015913), ("month", -1.8718021769015913)],
n = 4},
koData =
ClassData{prior = -infinity, unseen = -1.791759469228055,
likelihoods = HashMap.fromList [], n = 0}}),
("half an hour",
Classifier{okData =
ClassData{prior = -0.5108256237659907,
unseen = -1.6094379124341003,
likelihoods = HashMap.fromList [("", 0.0)], n = 3},
koData =
ClassData{prior = -0.916290731874155, unseen = -1.3862943611198906,
likelihoods = HashMap.fromList [("", 0.0)], n = 2}}),
("the <day-of-month> (ordinal)",
Classifier{okData =
ClassData{prior = 0.0, unseen = -2.3978952727983707,
likelihoods =
HashMap.fromList
[("ordinals (first..19st)", -1.2039728043259361),
("ordinal (digits)", -0.35667494393873245)],
n = 8},
koData =
ClassData{prior = -infinity, unseen = -1.0986122886681098,
likelihoods = HashMap.fromList [], n = 0}}),
("fractional number",
Classifier{okData =
ClassData{prior = -infinity, unseen = -0.6931471805599453,
likelihoods = HashMap.fromList [], n = 0},
koData =
ClassData{prior = 0.0, unseen = -2.3025850929940455,
likelihoods = HashMap.fromList [("", 0.0)], n = 8}}),
("Sunday",
Classifier{okData =
ClassData{prior = 0.0, unseen = -2.3978952727983707,
likelihoods = HashMap.fromList [("", 0.0)], n = 9},
koData =
ClassData{prior = -infinity, unseen = -0.6931471805599453,
likelihoods = HashMap.fromList [], n = 0}}),
("afternoon",
Classifier{okData =
ClassData{prior = 0.0, unseen = -1.0986122886681098,
likelihoods = HashMap.fromList [("", 0.0)], n = 1},
koData =
ClassData{prior = -infinity, unseen = -0.6931471805599453,
likelihoods = HashMap.fromList [], n = 0}}),
("<duration> from now",
Classifier{okData =
ClassData{prior = 0.0, unseen = -2.890371757896165,
likelihoods =
HashMap.fromList
[("second", -1.4469189829363254), ("year", -2.1400661634962708),
("<integer> <unit-of-duration>", -1.041453874828161),
("a <unit-of-duration>", -2.1400661634962708),
("minute", -1.7346010553881064)],
n = 6},
koData =
ClassData{prior = -infinity, unseen = -1.791759469228055,
likelihoods = HashMap.fromList [], n = 0}}),
("February",
Classifier{okData =
ClassData{prior = 0.0, unseen = -2.4849066497880004,
likelihoods = HashMap.fromList [("", 0.0)], n = 10},
koData =
ClassData{prior = -infinity, unseen = -0.6931471805599453,
likelihoods = HashMap.fromList [], n = 0}}),
("this <cycle>",
Classifier{okData =
ClassData{prior = -9.53101798043249e-2,
unseen = -3.367295829986474,
likelihoods =
HashMap.fromList
[("week", -1.7227665977411035),
("year (grain)", -2.2335922215070942),
("week (grain)", -1.7227665977411035),
("day", -1.9459101490553135), ("quarter", -2.639057329615259),
("year", -2.2335922215070942),
("quarter (grain)", -2.639057329615259),
("day (grain)", -1.9459101490553135)],
n = 10},
koData =
ClassData{prior = -2.3978952727983707,
unseen = -2.3978952727983707,
likelihoods =
HashMap.fromList
[("day", -1.6094379124341003),
("day (grain)", -1.6094379124341003)],
n = 1}}),
("minute (grain)",
Classifier{okData =
ClassData{prior = 0.0, unseen = -2.833213344056216,
likelihoods = HashMap.fromList [("", 0.0)], n = 15},
koData =
ClassData{prior = -infinity, unseen = -0.6931471805599453,
likelihoods = HashMap.fromList [], n = 0}}),
("ordinals (first..19st)",
Classifier{okData =
ClassData{prior = -5.406722127027582e-2,
unseen = -2.995732273553991,
likelihoods = HashMap.fromList [("", 0.0)], n = 18},
koData =
ClassData{prior = -2.9444389791664407,
unseen = -1.0986122886681098,
likelihoods = HashMap.fromList [("", 0.0)], n = 1}}),
("about <time-of-day>",
Classifier{okData =
ClassData{prior = -0.6931471805599453,
unseen = -2.5649493574615367,
likelihoods =
HashMap.fromList
[("time-of-day (latent)", -0.8754687373538999),
("hour", -0.8754687373538999)],
n = 4},
koData =
ClassData{prior = -0.6931471805599453,
unseen = -2.5649493574615367,
likelihoods =
HashMap.fromList
[("time-of-day (latent)", -1.3862943611198906),
("hour", -1.3862943611198906),
("<hour-of-day> half (as relative minutes)",
-1.3862943611198906),
("minute", -1.3862943611198906)],
n = 4}}),
("time-of-day (latent)",
Classifier{okData =
ClassData{prior = -0.7105389232718146, unseen = -4.0943445622221,
likelihoods =
HashMap.fromList [("integer (numeric)", -1.7094433359300068e-2)],
n = 57},
koData =
ClassData{prior = -0.6760527472006452, unseen = -4.127134385045092,
likelihoods =
HashMap.fromList
[("integer (numeric)", -0.2607262624632527),
("integer (0..19)", -1.4718165345580525)],
n = 59}}),
("year",
Classifier{okData =
ClassData{prior = -0.2231435513142097, unseen = -3.258096538021482,
likelihoods = HashMap.fromList [("integer (numeric)", 0.0)],
n = 24},
koData =
ClassData{prior = -1.6094379124341003,
unseen = -2.0794415416798357,
likelihoods = HashMap.fromList [("integer (numeric)", 0.0)],
n = 6}}),
("last <day-of-week> of <time>",
Classifier{okData =
ClassData{prior = 0.0, unseen = -2.833213344056216,
likelihoods =
HashMap.fromList
[("daymonth", -0.8266785731844679),
("SundayMarch", -1.6739764335716716),
("MondayMarch", -1.6739764335716716),
("Sundayintersect", -1.6739764335716716)],
n = 6},
koData =
ClassData{prior = -infinity, unseen = -1.6094379124341003,
likelihoods = HashMap.fromList [], n = 0}}),
("<integer> <unit-of-duration>",
Classifier{okData =
ClassData{prior = -0.5408064559779265, unseen = -4.74493212836325,
likelihoods =
HashMap.fromList
[("week", -2.538973871058276),
("integer (0..19)year (grain)", -3.349904087274605),
("integer (numeric)day (grain)", -2.9444389791664407),
("integer (0..19)hour (grain)", -4.04305126783455),
("second", -2.9444389791664407),
("integer (numeric)second (grain)", -4.04305126783455),
("a pairhour (grain)", -4.04305126783455),
("integer (numeric)year (grain)", -3.6375861597263857),
("day", -2.338303175596125), ("year", -2.9444389791664407),
("integer (numeric)week (grain)", -3.349904087274605),
("integer (0..19)month (grain)", -4.04305126783455),
("integer (0..19)second (grain)", -3.126760535960395),
("hour", -2.9444389791664407), ("month", -3.6375861597263857),
("integer (numeric)minute (grain)", -2.790288299339182),
("integer (0..19)minute (grain)", -2.9444389791664407),
("integer (numeric)month (grain)", -4.04305126783455),
("minute", -2.2512917986064953),
("integer (numeric)hour (grain)", -3.349904087274605),
("integer (0..19)day (grain)", -2.9444389791664407),
("integer (0..19)week (grain)", -2.9444389791664407)],
n = 46},
koData =
ClassData{prior = -0.8729402910005413, unseen = -4.48863636973214,
likelihoods =
HashMap.fromList
[("week", -2.6855773452501515),
("integer (0..19)year (grain)", -3.378724525810097),
("integer (numeric)day (grain)", -3.0910424533583156),
("integer (0..19)hour (grain)", -3.784189633918261),
("second", -2.867898902044106),
("integer (numeric)second (grain)", -3.378724525810097),
("integer (numeric)year (grain)", -3.0910424533583156),
("day", -2.6855773452501515), ("year", -2.6855773452501515),
("integer (numeric)week (grain)", -3.378724525810097),
("integer (0..19)month (grain)", -3.0910424533583156),
("integer (0..19)second (grain)", -3.378724525810097),
("hour", -2.6855773452501515), ("month", -2.6855773452501515),
("integer (numeric)minute (grain)", -3.378724525810097),
("integer (0..19)minute (grain)", -3.378724525810097),
("integer (numeric)month (grain)", -3.378724525810097),
("minute", -2.867898902044106),
("integer (numeric)hour (grain)", -2.867898902044106),
("integer (0..19)day (grain)", -3.378724525810097),
("integer (0..19)week (grain)", -3.0910424533583156)],
n = 33}}),
("<duration> after <time>",
Classifier{okData =
ClassData{prior = 0.0, unseen = -2.3025850929940455,
likelihoods =
HashMap.fromList
[("a <unit-of-duration>christmas eve", -1.5040773967762742),
("yearday", -0.8109302162163288),
("<integer> <unit-of-duration>christmas eve",
-1.0986122886681098)],
n = 3},
koData =
ClassData{prior = -infinity, unseen = -1.3862943611198906,
likelihoods = HashMap.fromList [], n = 0}}),
("relative minutes after|past <integer> (hour-of-day)",
Classifier{okData =
ClassData{prior = 0.0, unseen = -1.9459101490553135,
likelihoods =
HashMap.fromList
[("hour", -0.6931471805599453),
("integer (numeric)time-of-day (latent)", -0.6931471805599453)],
n = 2},
koData =
ClassData{prior = -infinity, unseen = -1.0986122886681098,
likelihoods = HashMap.fromList [], n = 0}}),
("a <unit-of-duration>",
Classifier{okData =
ClassData{prior = 0.0, unseen = -3.2188758248682006,
likelihoods =
HashMap.fromList
[("week", -2.0794415416798357),
("year (grain)", -2.4849066497880004),
("second", -2.0794415416798357),
("week (grain)", -2.0794415416798357),
("day", -2.4849066497880004),
("minute (grain)", -2.4849066497880004),
("year", -2.4849066497880004),
("second (grain)", -2.0794415416798357),
("minute", -2.4849066497880004),
("day (grain)", -2.4849066497880004)],
n = 7},
koData =
ClassData{prior = -infinity, unseen = -2.3978952727983707,
likelihoods = HashMap.fromList [], n = 0}}),
("intersect by \",\"",
Classifier{okData =
ClassData{prior = -9.763846956391606e-2,
unseen = -4.709530201312334,
likelihoods =
HashMap.fromList
[("Wednesday<named-month> <day-of-month> (non ordinal)",
-4.007333185232471),
("<named-month> <day-of-month> (non ordinal)Friday",
-4.007333185232471),
("Friday<named-month> <day-of-month> (non ordinal)",
-3.6018680771243066),
("at <time-of-day>Saturday", -3.6018680771243066),
("intersect by \",\"year", -4.007333185232471),
("hh:mmintersect by \",\"", -3.6018680771243066),
("dayday", -2.3025850929940455),
("dayyear", -3.0910424533583156),
("Saturday<day-of-month> (non ordinal) <named-month>",
-3.6018680771243066),
("<named-month> <day-of-month> (non ordinal)intersect",
-4.007333185232471),
("intersect by \",\"<day-of-month> (non ordinal) <named-month>",
-3.0910424533583156),
("Fridayintersect", -4.007333185232471),
("hh:mmintersect", -3.6018680771243066),
("intersect by \",\"intersect", -4.007333185232471),
("hh:mmSaturday", -3.6018680771243066),
("at <time-of-day>intersect", -3.6018680771243066),
("Sunday<day-of-month> (non ordinal) <named-month>",
-4.007333185232471),
("dayminute", -2.908720896564361),
("intersectyear", -4.007333185232471),
("minuteday", -1.6559579280689931),
("hh:mmabsorption of , after named day", -3.6018680771243066),
("at <time-of-day>intersect by \",\"", -3.6018680771243066),
("at <time-of-day>absorption of , after named day",
-3.6018680771243066),
("intersectintersect", -4.007333185232471),
("Fridayintersect by \",\"", -3.6018680771243066),
("Monday<named-month> <day-of-month> (non ordinal)",
-4.007333185232471),
("Monday<day-of-month> (non ordinal) <named-month>",
-4.007333185232471),
("<named-month> <day-of-month> (non ordinal)year",
-3.6018680771243066)],
n = 39},
koData =
ClassData{prior = -2.374905754573672, unseen = -3.713572066704308,
likelihoods =
HashMap.fromList
[("daymonth", -2.0794415416798357),
("FridayJuly", -2.5902671654458267),
("WednesdayFebruary", -2.995732273553991),
("MondayFebruary", -2.995732273553991)],
n = 4}}),
("hh:mm",
Classifier{okData =
ClassData{prior = -1.526747213078842e-2,
unseen = -4.204692619390966,
likelihoods = HashMap.fromList [("", 0.0)], n = 65},
koData =
ClassData{prior = -4.189654742026425, unseen = -1.0986122886681098,
likelihoods = HashMap.fromList [("", 0.0)], n = 1}}),
("quarter after|past <integer> (hour-of-day)",
Classifier{okData =
ClassData{prior = 0.0, unseen = -1.9459101490553135,
likelihoods =
HashMap.fromList
[("time-of-day (latent)", -0.6931471805599453),
("hour", -0.6931471805599453)],
n = 2},
koData =
ClassData{prior = -infinity, unseen = -1.0986122886681098,
likelihoods = HashMap.fromList [], n = 0}}),
("second (grain)",
Classifier{okData =
ClassData{prior = 0.0, unseen = -2.3978952727983707,
likelihoods = HashMap.fromList [("", 0.0)], n = 9},
koData =
ClassData{prior = -infinity, unseen = -0.6931471805599453,
likelihoods = HashMap.fromList [], n = 0}}),
("<time-of-day> sharp",
Classifier{okData =
ClassData{prior = 0.0, unseen = -2.772588722239781,
likelihoods =
HashMap.fromList
[("at <time-of-day>", -1.3217558399823195),
("time-of-day (latent)", -1.3217558399823195),
("hour", -0.7621400520468967)],
n = 6},
koData =
ClassData{prior = -infinity, unseen = -1.3862943611198906,
likelihoods = HashMap.fromList [], n = 0}}),
("intersect by \"of\", \"from\", \"'s\"",
Classifier{okData =
ClassData{prior = -0.8209805520698302,
unseen = -3.6375861597263857,
likelihoods =
HashMap.fromList
[("Sundaylast <cycle>", -2.917770732084279),
("daymonth", -1.6650077635889111),
("Wednesdayintersect", -2.512305623976115),
("Wednesdaynext <cycle>", -2.917770732084279),
("Tuesdaythis <cycle>", -2.917770732084279),
("WednesdayOctober", -2.512305623976115),
("Wednesdaythis <cycle>", -2.917770732084279),
("TuesdayOctober", -2.512305623976115),
("Mondaythis <cycle>", -2.917770732084279),
("dayweek", -1.8191584434161694)],
n = 11},
koData =
ClassData{prior = -0.579818495252942, unseen = -3.784189633918261,
likelihoods =
HashMap.fromList
[("daymonth", -1.0531499145913523),
("TuesdaySeptember", -2.662587827025453),
("Wednesdayintersect", -2.662587827025453),
("WednesdayOctober", -2.662587827025453),
("SundayMarch", -2.662587827025453),
("MondayMarch", -2.662587827025453),
("Tuesdayintersect", -2.662587827025453),
("Sundayintersect", -2.662587827025453)],
n = 14}}),
("<duration> ago",
Classifier{okData =
ClassData{prior = 0.0, unseen = -3.5553480614894135,
likelihoods =
HashMap.fromList
[("week", -1.580450375560848), ("day", -1.916922612182061),
("year", -2.4277482359480516),
("<integer> <unit-of-duration>", -0.8873031950009028),
("a <unit-of-duration>", -2.833213344056216),
("month", -2.4277482359480516)],
n = 14},
koData =
ClassData{prior = -infinity, unseen = -1.9459101490553135,
likelihoods = HashMap.fromList [], n = 0}}),
("last <time>",
Classifier{okData =
ClassData{prior = -2.751535313041949, unseen = -2.833213344056216,
likelihoods =
HashMap.fromList
[("day", -1.6739764335716716), ("Sunday", -2.0794415416798357),
("hour", -2.0794415416798357), ("Tuesday", -2.0794415416798357),
("week-end", -2.0794415416798357)],
n = 3},
koData =
ClassData{prior = -6.59579677917974e-2, unseen = -4.59511985013459,
likelihoods =
HashMap.fromList
[("year (latent)", -1.6405284995041316),
("Monday", -3.4863551900024623), ("day", -2.3877429013343523),
("Sunday", -3.4863551900024623),
("time-of-day (latent)", -1.6405284995041316),
("year", -1.6405284995041316),
("intersect by \"of\", \"from\", \"'s\"", -2.9755295662364714),
("hour", -1.6405284995041316)],
n = 44}}),
("March",
Classifier{okData =
ClassData{prior = 0.0, unseen = -2.833213344056216,
likelihoods = HashMap.fromList [("", 0.0)], n = 15},
koData =
ClassData{prior = -infinity, unseen = -0.6931471805599453,
likelihoods = HashMap.fromList [], n = 0}}),
("the day after tomorrow",
Classifier{okData =
ClassData{prior = 0.0, unseen = -1.0986122886681098,
likelihoods = HashMap.fromList [("", 0.0)], n = 1},
koData =
ClassData{prior = -infinity, unseen = -0.6931471805599453,
likelihoods = HashMap.fromList [], n = 0}}),
("noon",
Classifier{okData =
ClassData{prior = -infinity, unseen = -0.6931471805599453,
likelihoods = HashMap.fromList [], n = 0},
koData =
ClassData{prior = 0.0, unseen = -1.0986122886681098,
likelihoods = HashMap.fromList [("", 0.0)], n = 1}}),
("until <time-of-day>",
Classifier{okData =
ClassData{prior = -0.1823215567939546, unseen = -3.258096538021482,
likelihoods =
HashMap.fromList
[("at <time-of-day>", -0.8209805520698302),
("hour", -0.8209805520698302)],
n = 10},
koData =
ClassData{prior = -1.791759469228055, unseen = -2.3025850929940455,
likelihoods =
HashMap.fromList
[("intersect", -1.5040773967762742),
("hh:mm", -1.5040773967762742),
("minute", -1.0986122886681098)],
n = 2}}),
("<integer> and an half hours",
Classifier{okData =
ClassData{prior = 0.0, unseen = -1.6094379124341003,
likelihoods =
HashMap.fromList
[("integer (numeric)", -0.6931471805599453),
("integer (0..19)", -0.6931471805599453)],
n = 2},
koData =
ClassData{prior = -infinity, unseen = -1.0986122886681098,
likelihoods = HashMap.fromList [], n = 0}}),
("after <duration>",
Classifier{okData =
ClassData{prior = 0.0, unseen = -1.9459101490553135,
likelihoods =
HashMap.fromList
[("day", -0.6931471805599453),
("<integer> <unit-of-duration>", -0.6931471805599453)],
n = 2},
koData =
ClassData{prior = -infinity, unseen = -1.0986122886681098,
likelihoods = HashMap.fromList [], n = 0}}),
("evening",
Classifier{okData =
ClassData{prior = 0.0, unseen = -1.6094379124341003,
likelihoods = HashMap.fromList [("", 0.0)], n = 3},
koData =
ClassData{prior = -infinity, unseen = -0.6931471805599453,
likelihoods = HashMap.fromList [], n = 0}}),
("decimal number",
Classifier{okData =
ClassData{prior = -infinity, unseen = -0.6931471805599453,
likelihoods = HashMap.fromList [], n = 0},
koData =
ClassData{prior = 0.0, unseen = -1.0986122886681098,
likelihoods = HashMap.fromList [("", 0.0)], n = 1}}),
("next <time>",
Classifier{okData =
ClassData{prior = -0.587786664902119, unseen = -2.890371757896165,
likelihoods =
HashMap.fromList
[("Wednesday", -2.1400661634962708),
("Monday", -2.1400661634962708), ("day", -1.2237754316221157),
("March", -2.1400661634962708), ("month", -2.1400661634962708),
("Tuesday", -1.7346010553881064)],
n = 5},
koData =
ClassData{prior = -0.8109302162163288, unseen = -2.772588722239781,
likelihoods =
HashMap.fromList
[("Wednesday", -2.0149030205422647),
("Friday", -1.6094379124341003), ("day", -1.3217558399823195),
("March", -2.0149030205422647), ("month", -2.0149030205422647)],
n = 4}}),
("last <cycle>",
Classifier{okData =
ClassData{prior = -0.6931471805599453, unseen = -2.833213344056216,
likelihoods =
HashMap.fromList
[("week", -1.6739764335716716),
("month (grain)", -2.0794415416798357),
("year (grain)", -2.0794415416798357),
("week (grain)", -1.6739764335716716),
("year", -2.0794415416798357), ("month", -2.0794415416798357)],
n = 4},
koData =
ClassData{prior = -0.6931471805599453, unseen = -2.833213344056216,
likelihoods =
HashMap.fromList
[("week", -1.6739764335716716),
("week (grain)", -1.6739764335716716),
("day", -1.6739764335716716),
("day (grain)", -1.6739764335716716)],
n = 4}}),
("christmas",
Classifier{okData =
ClassData{prior = 0.0, unseen = -1.6094379124341003,
likelihoods = HashMap.fromList [("", 0.0)], n = 3},
koData =
ClassData{prior = -infinity, unseen = -0.6931471805599453,
likelihoods = HashMap.fromList [], n = 0}}),
("new year's day",
Classifier{okData =
ClassData{prior = 0.0, unseen = -1.0986122886681098,
likelihoods = HashMap.fromList [("", 0.0)], n = 1},
koData =
ClassData{prior = -infinity, unseen = -0.6931471805599453,
likelihoods = HashMap.fromList [], n = 0}}),
("next n <cycle>",
Classifier{okData =
ClassData{prior = 0.0, unseen = -3.912023005428146,
likelihoods =
HashMap.fromList
[("week", -2.793208009442517),
("integer (0..19)year (grain)", -3.1986731175506815),
("integer (numeric)day (grain)", -3.1986731175506815),
("integer (0..19)hour (grain)", -3.1986731175506815),
("second", -2.793208009442517),
("integer (numeric)second (grain)", -3.1986731175506815),
("integer (numeric)year (grain)", -3.1986731175506815),
("day", -2.793208009442517), ("year", -2.793208009442517),
("integer (numeric)week (grain)", -3.1986731175506815),
("integer (0..19)month (grain)", -3.1986731175506815),
("integer (0..19)second (grain)", -3.1986731175506815),
("hour", -2.793208009442517), ("month", -2.793208009442517),
("integer (numeric)minute (grain)", -3.1986731175506815),
("integer (0..19)minute (grain)", -3.1986731175506815),
("integer (numeric)month (grain)", -3.1986731175506815),
("minute", -2.793208009442517),
("integer (numeric)hour (grain)", -3.1986731175506815),
("integer (0..19)day (grain)", -3.1986731175506815),
("integer (0..19)week (grain)", -3.1986731175506815)],
n = 14},
koData =
ClassData{prior = -infinity, unseen = -3.0910424533583156,
likelihoods = HashMap.fromList [], n = 0}}),
("in <duration>",
Classifier{okData =
ClassData{prior = 0.0, unseen = -4.418840607796598,
likelihoods =
HashMap.fromList
[("week", -3.0204248861443626),
("<integer> more <unit-of-duration>", -3.3081069585961433),
("number.number hours", -3.713572066704308),
("second", -2.797281334830153), ("day", -2.6149597780361984),
("half an hour", -3.713572066704308),
("<integer> <unit-of-duration>", -1.2286654169163076),
("a <unit-of-duration>", -2.797281334830153),
("<integer> and an half hours", -3.3081069585961433),
("hour", -2.6149597780361984), ("minute", -1.4622802680978126),
("about <duration>", -3.3081069585961433)],
n = 35},
koData =
ClassData{prior = -infinity, unseen = -2.5649493574615367,
likelihoods = HashMap.fromList [], n = 0}}),
("<datetime> - <datetime> (interval)",
Classifier{okData =
ClassData{prior = -0.5436154465889815,
unseen = -3.8501476017100584,
likelihoods =
HashMap.fromList
[("minuteminute", -1.1895840668738362),
("hh:mmhh:mm", -1.1895840668738362),
("dayday", -2.03688192726104),
("<day-of-month> (non ordinal) <named-month><day-of-month> (non ordinal) <named-month>",
-2.03688192726104)],
n = 18},
koData =
ClassData{prior = -0.8690378470236094,
unseen = -3.6109179126442243,
likelihoods =
HashMap.fromList
[("July<day-of-month> (non ordinal) <named-month>",
-2.890371757896165),
("monthday", -1.791759469228055),
("minuteminute", -1.6376087894007967),
("hh:mmhh:mm", -2.890371757896165),
("dayyear", -2.4849066497880004),
("hh:mmintersect", -1.791759469228055),
("August<day-of-month> (non ordinal) <named-month>",
-1.9740810260220096),
("dd/mmyear", -2.4849066497880004)],
n = 13}}),
("Tuesday",
Classifier{okData =
ClassData{prior = 0.0, unseen = -2.3978952727983707,
likelihoods = HashMap.fromList [("", 0.0)], n = 9},
koData =
ClassData{prior = -infinity, unseen = -0.6931471805599453,
likelihoods = HashMap.fromList [], n = 0}}),
("<time-of-day> - <time-of-day> (interval)",
Classifier{okData =
ClassData{prior = -0.6241543090729939,
unseen = -3.5553480614894135,
likelihoods =
HashMap.fromList
[("hh:mmtime-of-day (latent)", -2.4277482359480516),
("minuteminute", -0.8873031950009028),
("hh:mmhh:mm", -0.8873031950009028),
("minutehour", -2.4277482359480516)],
n = 15},
koData =
ClassData{prior = -0.7672551527136672,
unseen = -3.4339872044851463,
likelihoods =
HashMap.fromList
[("hh:mmtime-of-day (latent)", -0.8362480242006186),
("minuteminute", -2.70805020110221),
("hh:mmhh:mm", -2.70805020110221),
("minutehour", -0.8362480242006186)],
n = 13}}),
("last n <cycle>",
Classifier{okData =
ClassData{prior = 0.0, unseen = -4.04305126783455,
likelihoods =
HashMap.fromList
[("week", -2.639057329615259),
("integer (0..19)year (grain)", -3.332204510175204),
("integer (numeric)day (grain)", -2.9267394020670396),
("second", -2.9267394020670396),
("integer (numeric)second (grain)", -3.332204510175204),
("integer (numeric)year (grain)", -2.9267394020670396),
("day", -2.639057329615259), ("year", -2.639057329615259),
("integer (numeric)week (grain)", -3.332204510175204),
("integer (0..19)month (grain)", -2.9267394020670396),
("integer (0..19)second (grain)", -3.332204510175204),
("hour", -2.9267394020670396), ("month", -2.639057329615259),
("integer (numeric)minute (grain)", -3.332204510175204),
("integer (0..19)minute (grain)", -3.332204510175204),
("integer (numeric)month (grain)", -3.332204510175204),
("minute", -2.9267394020670396),
("integer (numeric)hour (grain)", -2.9267394020670396),
("integer (0..19)day (grain)", -3.332204510175204),
("integer (0..19)week (grain)", -2.9267394020670396)],
n = 18},
koData =
ClassData{prior = -infinity, unseen = -3.044522437723423,
likelihoods = HashMap.fromList [], n = 0}}),
("<named-month> <day-of-month> (non ordinal)",
Classifier{okData =
ClassData{prior = -0.11778303565638351,
unseen = -3.044522437723423,
likelihoods =
HashMap.fromList
[("Aprilinteger (numeric)", -2.3025850929940455),
("month", -0.7985076962177716),
("Februaryinteger (numeric)", -1.3862943611198906),
("Julyinteger (numeric)", -1.6094379124341003)],
n = 8},
koData =
ClassData{prior = -2.1972245773362196,
unseen = -1.9459101490553135,
likelihoods =
HashMap.fromList
[("Aprilinteger (numeric)", -1.0986122886681098),
("month", -1.0986122886681098)],
n = 1}}),
("<day-of-month> (non ordinal) <named-month>",
Classifier{okData =
ClassData{prior = -0.1466034741918754, unseen = -3.828641396489095,
likelihoods =
HashMap.fromList
[("integer (numeric)September", -2.70805020110221),
("integer (numeric)April", -3.1135153092103742),
("integer (numeric)August", -1.5040773967762742),
("integer (numeric)February", -2.4203681286504293),
("month", -0.8109302162163288),
("integer (numeric)July", -2.70805020110221),
("integer (numeric)March", -2.70805020110221)],
n = 19},
koData =
ClassData{prior = -1.9924301646902063, unseen = -2.639057329615259,
likelihoods =
HashMap.fromList
[("month", -1.1786549963416462),
("integer (numeric)July", -1.1786549963416462)],
n = 3}}),
("this|next <day-of-week>",
Classifier{okData =
ClassData{prior = -0.5596157879354228, unseen = -2.639057329615259,
likelihoods =
HashMap.fromList
[("Wednesday", -1.8718021769015913),
("Monday", -1.8718021769015913), ("day", -0.9555114450274363),
("Tuesday", -1.466337068793427)],
n = 4},
koData =
ClassData{prior = -0.8472978603872037,
unseen = -2.4849066497880004,
likelihoods =
HashMap.fromList
[("Wednesday", -1.7047480922384253),
("Friday", -1.2992829841302609), ("day", -1.0116009116784799)],
n = 3}}),
("<hour-of-day> half (as relative minutes)",
Classifier{okData =
ClassData{prior = -infinity, unseen = -1.3862943611198906,
likelihoods = HashMap.fromList [], n = 0},
koData =
ClassData{prior = 0.0, unseen = -2.890371757896165,
likelihoods =
HashMap.fromList
[("about <time-of-day>", -1.7346010553881064),
("time-of-day (latent)", -1.041453874828161),
("hour", -0.7537718023763802)],
n = 7}}),
("ordinal (digits)",
Classifier{okData =
ClassData{prior = -8.701137698962981e-2,
unseen = -2.5649493574615367,
likelihoods = HashMap.fromList [("", 0.0)], n = 11},
koData =
ClassData{prior = -2.4849066497880004,
unseen = -1.0986122886681098,
likelihoods = HashMap.fromList [("", 0.0)], n = 1}}),
("quarter (grain)",
Classifier{okData =
ClassData{prior = 0.0, unseen = -2.0794415416798357,
likelihoods = HashMap.fromList [("", 0.0)], n = 6},
koData =
ClassData{prior = -infinity, unseen = -0.6931471805599453,
likelihoods = HashMap.fromList [], n = 0}}),
("last <cycle> of <time>",
Classifier{okData =
ClassData{prior = 0.0, unseen = -3.1354942159291497,
likelihoods =
HashMap.fromList
[("day (grain)October", -1.9924301646902063),
("daymonth", -1.4816045409242156),
("day (grain)intersect", -1.9924301646902063),
("weekmonth", -1.4816045409242156),
("week (grain)intersect", -1.9924301646902063),
("week (grain)September", -1.9924301646902063)],
n = 8},
koData =
ClassData{prior = -infinity, unseen = -1.9459101490553135,
likelihoods = HashMap.fromList [], n = 0}}),
("<day-of-month>(ordinal) <named-month> year",
Classifier{okData =
ClassData{prior = 0.0, unseen = -2.3978952727983707,
likelihoods =
HashMap.fromList
[("ordinals (first..19st)April", -1.6094379124341003),
("ordinals (first..19st)March", -1.6094379124341003),
("month", -0.916290731874155),
("ordinal (digits)March", -1.6094379124341003)],
n = 3},
koData =
ClassData{prior = -infinity, unseen = -1.6094379124341003,
likelihoods = HashMap.fromList [], n = 0}}),
("morning",
Classifier{okData =
ClassData{prior = -2.1972245773362196,
unseen = -1.3862943611198906,
likelihoods = HashMap.fromList [("", 0.0)], n = 2},
koData =
ClassData{prior = -0.11778303565638351,
unseen = -2.890371757896165,
likelihoods = HashMap.fromList [("", 0.0)], n = 16}}),
("week-end",
Classifier{okData =
ClassData{prior = 0.0, unseen = -1.6094379124341003,
likelihoods = HashMap.fromList [("", 0.0)], n = 3},
koData =
ClassData{prior = -infinity, unseen = -0.6931471805599453,
likelihoods = HashMap.fromList [], n = 0}}),
("after <time-of-day>",
Classifier{okData =
ClassData{prior = -0.6931471805599453, unseen = -3.58351893845611,
likelihoods =
HashMap.fromList
[("at <time-of-day>", -1.6094379124341003),
("intersect", -2.169053700369523),
("tomorrow", -2.169053700369523), ("day", -2.169053700369523),
("hour", -1.252762968495368)],
n = 12},
koData =
ClassData{prior = -0.6931471805599453, unseen = -3.58351893845611,
likelihoods =
HashMap.fromList
[("this <part-of-day>", -2.169053700369523),
("christmas eve", -2.456735772821304),
("in|during the <part-of-day>", -2.169053700369523),
("day", -2.456735772821304), ("hh:mm", -2.8622008809294686),
("hour", -1.252762968495368), ("minute", -2.8622008809294686),
("this <time>", -2.169053700369523)],
n = 12}}),
("day (grain)",
Classifier{okData =
ClassData{prior = -4.445176257083381e-2,
unseen = -3.1780538303479458,
likelihoods = HashMap.fromList [("", 0.0)], n = 22},
koData =
ClassData{prior = -3.1354942159291497,
unseen = -1.0986122886681098,
likelihoods = HashMap.fromList [("", 0.0)], n = 1}}),
("<month> dd-dd (interval)",
Classifier{okData =
ClassData{prior = 0.0, unseen = -2.1972245773362196,
likelihoods =
HashMap.fromList
[("July", -0.6931471805599453), ("month", -0.6931471805599453)],
n = 3},
koData =
ClassData{prior = -infinity, unseen = -1.0986122886681098,
likelihoods = HashMap.fromList [], n = 0}}),
("about <duration>",
Classifier{okData =
ClassData{prior = 0.0, unseen = -1.9459101490553135,
likelihoods =
HashMap.fromList
[("half an hour", -0.6931471805599453),
("minute", -0.6931471805599453)],
n = 2},
koData =
ClassData{prior = -infinity, unseen = -1.0986122886681098,
likelihoods = HashMap.fromList [], n = 0}}),
("this <time>",
Classifier{okData =
ClassData{prior = -1.3862943611198906, unseen = -3.871201010907891,
likelihoods =
HashMap.fromList
[("week", -2.2407096892759584), ("October", -2.751535313041949),
("intersect", -2.751535313041949),
("season", -2.2407096892759584),
("next <cycle>", -3.1570004211501135),
("day", -2.2407096892759584),
("this <cycle>", -2.463853240590168),
("hour", -2.2407096892759584), ("evening", -3.1570004211501135),
("month", -2.2407096892759584),
("morning", -3.1570004211501135),
("week-end", -2.751535313041949)],
n = 16},
koData =
ClassData{prior = -0.2876820724517809, unseen = -4.718498871295094,
likelihoods =
HashMap.fromList
[("September", -3.100092288878234),
("October", -2.3116349285139637),
("intersect", -2.1445808438507976),
("time-of-day (latent)", -3.6109179126442243),
("March", -3.100092288878234), ("hour", -1.7650912221458936),
("month", -1.2755429968271879),
("morning", -1.8763168572561182)],
n = 48}}),
("within <duration>",
Classifier{okData =
ClassData{prior = 0.0, unseen = -1.6094379124341003,
likelihoods =
HashMap.fromList
[("week", -0.6931471805599453),
("<integer> <unit-of-duration>", -0.6931471805599453)],
n = 1},
koData =
ClassData{prior = -infinity, unseen = -1.0986122886681098,
likelihoods = HashMap.fromList [], n = 0}}),
("August",
Classifier{okData =
ClassData{prior = 0.0, unseen = -2.3978952727983707,
likelihoods = HashMap.fromList [("", 0.0)], n = 9},
koData =
ClassData{prior = -infinity, unseen = -0.6931471805599453,
likelihoods = HashMap.fromList [], n = 0}})] | null | https://raw.githubusercontent.com/facebook/duckling/03c6197283943c595608bb977a88a07c9e997006/Duckling/Ranking/Classifiers/DA_XX.hs | haskell | All rights reserved.
This source code is licensed under the BSD-style license found in the
LICENSE file in the root directory of this source tree. An additional grant
---------------------------------------------------------------
Auto-generated by regenClassifiers
DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
---------------------------------------------------------------
# LANGUAGE OverloadedStrings # | Copyright ( c ) 2016 - present , Facebook , Inc.
of patent rights can be found in the PATENTS file in the same directory .
@generated
module Duckling.Ranking.Classifiers.DA_XX (classifiers) where
import Data.String
import Prelude
import qualified Data.HashMap.Strict as HashMap
import Duckling.Ranking.Types
classifiers :: Classifiers
classifiers
= HashMap.fromList
[("<time> timezone",
Classifier{okData =
ClassData{prior = 0.0, unseen = -3.6109179126442243,
likelihoods =
HashMap.fromList
[("at <time-of-day>", -1.2809338454620642),
("hh:mm", -1.5040773967762742), ("hour", -1.9740810260220096),
("minute", -1.0185695809945732)],
n = 16},
koData =
ClassData{prior = -infinity, unseen = -1.6094379124341003,
likelihoods = HashMap.fromList [], n = 0}}),
("Thursday",
Classifier{okData =
ClassData{prior = 0.0, unseen = -3.0910424533583156,
likelihoods = HashMap.fromList [("", 0.0)], n = 20},
koData =
ClassData{prior = -infinity, unseen = -0.6931471805599453,
likelihoods = HashMap.fromList [], n = 0}}),
("integer (numeric)",
Classifier{okData =
ClassData{prior = -0.7872986116830517,
unseen = -5.0689042022202315,
likelihoods = HashMap.fromList [("", 0.0)], n = 157},
koData =
ClassData{prior = -0.6071024542014105, unseen = -5.247024072160486,
likelihoods = HashMap.fromList [("", 0.0)], n = 188}}),
("the day before yesterday",
Classifier{okData =
ClassData{prior = 0.0, unseen = -1.0986122886681098,
likelihoods = HashMap.fromList [("", 0.0)], n = 1},
koData =
ClassData{prior = -infinity, unseen = -0.6931471805599453,
likelihoods = HashMap.fromList [], n = 0}}),
("lunch",
Classifier{okData =
ClassData{prior = 0.0, unseen = -1.0986122886681098,
likelihoods = HashMap.fromList [("", 0.0)], n = 1},
koData =
ClassData{prior = -infinity, unseen = -0.6931471805599453,
likelihoods = HashMap.fromList [], n = 0}}),
("<time> <part-of-day>",
Classifier{okData =
ClassData{prior = 0.0, unseen = -2.639057329615259,
likelihoods =
HashMap.fromList
[("dayhour", -0.9555114450274363),
("tomorrowevening", -1.8718021769015913),
("tomorrowlunch", -1.8718021769015913),
("yesterdayevening", -1.8718021769015913),
("Mondaymorning", -1.8718021769015913)],
n = 4},
koData =
ClassData{prior = -infinity, unseen = -1.791759469228055,
likelihoods = HashMap.fromList [], n = 0}}),
("dd/mm",
Classifier{okData =
ClassData{prior = -0.40546510810816444,
unseen = -2.3025850929940455,
likelihoods = HashMap.fromList [("", 0.0)], n = 8},
koData =
ClassData{prior = -1.0986122886681098, unseen = -1.791759469228055,
likelihoods = HashMap.fromList [("", 0.0)], n = 4}}),
("today",
Classifier{okData =
ClassData{prior = -0.1823215567939546,
unseen = -1.9459101490553135,
likelihoods = HashMap.fromList [("", 0.0)], n = 5},
koData =
ClassData{prior = -1.791759469228055, unseen = -1.0986122886681098,
likelihoods = HashMap.fromList [("", 0.0)], n = 1}}),
("at <time-of-day>",
Classifier{okData =
ClassData{prior = -0.2674793651342615, unseen = -5.062595033026967,
likelihoods =
HashMap.fromList
[("<time> timezone", -3.264486336120253),
("time-of-day (latent)", -1.2495833155779883),
("relative minutes after|past <integer> (hour-of-day)",
-3.9576335166801986),
("hh:mm", -1.9652033519899923),
("<time-of-day> sharp", -3.6699514442284173),
("hour", -1.1850447944404172), ("minute", -1.688949975361834)],
n = 75},
koData =
ClassData{prior = -1.4494732627414222,
unseen = -3.9889840465642745,
likelihoods =
HashMap.fromList
[("time-of-day (latent)", -0.7922380832041762),
("hour", -0.7922380832041762)],
n = 23}}),
("absorption of , after named day",
Classifier{okData =
ClassData{prior = 0.0, unseen = -3.1354942159291497,
likelihoods =
HashMap.fromList
[("Wednesday", -2.3978952727983707),
("Saturday", -1.9924301646902063),
("Monday", -1.9924301646902063),
("Friday", -1.9924301646902063), ("day", -0.8938178760220964),
("Sunday", -2.3978952727983707)],
n = 8},
koData =
ClassData{prior = -infinity, unseen = -1.9459101490553135,
likelihoods = HashMap.fromList [], n = 0}}),
("September",
Classifier{okData =
ClassData{prior = 0.0, unseen = -2.0794415416798357,
likelihoods = HashMap.fromList [("", 0.0)], n = 6},
koData =
ClassData{prior = -infinity, unseen = -0.6931471805599453,
likelihoods = HashMap.fromList [], n = 0}}),
("tonight",
Classifier{okData =
ClassData{prior = 0.0, unseen = -1.0986122886681098,
likelihoods = HashMap.fromList [("", 0.0)], n = 1},
koData =
ClassData{prior = -infinity, unseen = -0.6931471805599453,
likelihoods = HashMap.fromList [], n = 0}}),
("on <date>",
Classifier{okData =
ClassData{prior = 0.0, unseen = -3.784189633918261,
likelihoods =
HashMap.fromList
[("Thursday", -1.6817585740137264),
("Saturday", -3.068052935133617),
("Monday", -3.068052935133617),
("the <day-of-month> (non ordinal)", -2.662587827025453),
("<day-of-month>(ordinal) <named-month>", -1.8152899666382492),
("day", -0.8167611365271219),
("the <day-of-month> (ordinal)", -3.068052935133617)],
n = 18},
koData =
ClassData{prior = -infinity, unseen = -2.0794415416798357,
likelihoods = HashMap.fromList [], n = 0}}),
("integer (0..19)",
Classifier{okData =
ClassData{prior = -0.13976194237515874,
unseen = -3.7376696182833684,
likelihoods = HashMap.fromList [("", 0.0)], n = 40},
koData =
ClassData{prior = -2.03688192726104, unseen = -2.0794415416798357,
likelihoods = HashMap.fromList [("", 0.0)], n = 6}}),
("between <time-of-day> and <time-of-day> (interval)",
Classifier{okData =
ClassData{prior = -0.2876820724517809,
unseen = -2.3978952727983707,
likelihoods =
HashMap.fromList
[("hh:mmtime-of-day (latent)", -1.6094379124341003),
("minuteminute", -1.2039728043259361),
("hh:mmhh:mm", -1.2039728043259361),
("minutehour", -1.6094379124341003)],
n = 3},
koData =
ClassData{prior = -1.3862943611198906,
unseen = -1.9459101490553135,
likelihoods =
HashMap.fromList
[("hh:mmtime-of-day (latent)", -1.0986122886681098),
("minutehour", -1.0986122886681098)],
n = 1}}),
("between <datetime> and <datetime> (interval)",
Classifier{okData =
ClassData{prior = -0.5108256237659907,
unseen = -2.4849066497880004,
likelihoods =
HashMap.fromList
[("hh:mmtime-of-day (latent)", -1.7047480922384253),
("minuteminute", -1.2992829841302609),
("hh:mmhh:mm", -1.2992829841302609),
("minutehour", -1.7047480922384253)],
n = 3},
koData =
ClassData{prior = -0.916290731874155, unseen = -2.3025850929940455,
likelihoods =
HashMap.fromList
[("hh:mmtime-of-day (latent)", -1.5040773967762742),
("minuteminute", -1.5040773967762742),
("minutehour", -1.5040773967762742),
("hh:mmintersect", -1.5040773967762742)],
n = 2}}),
("October",
Classifier{okData =
ClassData{prior = 0.0, unseen = -2.70805020110221,
likelihoods = HashMap.fromList [("", 0.0)], n = 13},
koData =
ClassData{prior = -infinity, unseen = -0.6931471805599453,
likelihoods = HashMap.fromList [], n = 0}}),
("month (grain)",
Classifier{okData =
ClassData{prior = 0.0, unseen = -2.3978952727983707,
likelihoods = HashMap.fromList [("", 0.0)], n = 9},
koData =
ClassData{prior = -infinity, unseen = -0.6931471805599453,
likelihoods = HashMap.fromList [], n = 0}}),
("<integer> more <unit-of-duration>",
Classifier{okData =
ClassData{prior = 0.0, unseen = -2.0794415416798357,
likelihoods =
HashMap.fromList
[("integer (numeric)minute (grain)", -1.252762968495368),
("integer (0..19)minute (grain)", -1.252762968495368),
("minute", -0.8472978603872037)],
n = 2},
koData =
ClassData{prior = -infinity, unseen = -1.3862943611198906,
likelihoods = HashMap.fromList [], n = 0}}),
("<time-of-day> o'clock",
Classifier{okData =
ClassData{prior = 0.0, unseen = -1.6094379124341003,
likelihoods =
HashMap.fromList
[("time-of-day (latent)", -0.6931471805599453),
("hour", -0.6931471805599453)],
n = 1},
koData =
ClassData{prior = -infinity, unseen = -1.0986122886681098,
likelihoods = HashMap.fromList [], n = 0}}),
("Wednesday",
Classifier{okData =
ClassData{prior = 0.0, unseen = -2.5649493574615367,
likelihoods = HashMap.fromList [("", 0.0)], n = 11},
koData =
ClassData{prior = -infinity, unseen = -0.6931471805599453,
likelihoods = HashMap.fromList [], n = 0}}),
("July",
Classifier{okData =
ClassData{prior = 0.0, unseen = -2.3025850929940455,
likelihoods = HashMap.fromList [("", 0.0)], n = 8},
koData =
ClassData{prior = -infinity, unseen = -0.6931471805599453,
likelihoods = HashMap.fromList [], n = 0}}),
("hour (grain)",
Classifier{okData =
ClassData{prior = -0.10536051565782628,
unseen = -2.3978952727983707,
likelihoods = HashMap.fromList [("", 0.0)], n = 9},
koData =
ClassData{prior = -2.3025850929940455,
unseen = -1.0986122886681098,
likelihoods = HashMap.fromList [("", 0.0)], n = 1}}),
("<ordinal> quarter",
Classifier{okData =
ClassData{prior = -0.6931471805599453,
unseen = -2.0794415416798357,
likelihoods =
HashMap.fromList
[("ordinal (digits)quarter (grain)", -1.252762968495368),
("quarter", -0.8472978603872037),
("ordinals (first..19st)quarter (grain)", -1.252762968495368)],
n = 2},
koData =
ClassData{prior = -0.6931471805599453,
unseen = -2.0794415416798357,
likelihoods =
HashMap.fromList
[("ordinal (digits)quarter (grain)", -1.252762968495368),
("quarter", -0.8472978603872037),
("ordinals (first..19st)quarter (grain)", -1.252762968495368)],
n = 2}}),
("intersect",
Classifier{okData =
ClassData{prior = -0.3657247794586503, unseen = -5.955837369464831,
likelihoods =
HashMap.fromList
[("<datetime> - <datetime> (interval)on <date>",
-4.161483865059729),
("Wednesday<named-month> <day-of-month> (non ordinal)",
-5.260096153727839),
("<time-of-day> - <time-of-day> (interval)on <date>",
-4.161483865059729),
("hourday", -5.260096153727839),
("dayhour", -2.5520459526256287),
("daymonth", -3.468336684499784),
("monthyear", -3.180654612048003),
("Mondayon <date>", -5.260096153727839),
("intersecthh:mm", -5.260096153727839),
("Wednesdaynext <cycle>", -5.260096153727839),
("Marchyear", -4.854631045619675),
("intersect by \"of\", \"from\", \"'s\"year",
-4.854631045619675),
("Mondayintersect", -5.260096153727839),
("last <day-of-week> of <time>year", -4.854631045619675),
("todayat <time-of-day>", -4.343805421853684),
("Thursday<time> timezone", -3.8738017926079484),
("dayday", -3.180654612048003),
("dd/mmat <time-of-day>", -4.007333185232471),
("the <day-of-month> (ordinal)February", -4.854631045619675),
("intersect by \",\"hh:mm", -4.343805421853684),
("Mondaythe <day-of-month> (non ordinal)", -5.260096153727839),
("Thursdaybetween <time-of-day> and <time-of-day> (interval)",
-4.854631045619675),
("dayyear", -3.5553480614894135),
("Thursdaybetween <datetime> and <datetime> (interval)",
-4.854631045619675),
("Thursdayat <time-of-day>", -4.161483865059729),
("tomorrow<time-of-day> sharp", -4.566948973167894),
("<day-of-month>(ordinal) <named-month>year",
-4.854631045619675),
("absorption of , after named day<named-month> <day-of-month> (non ordinal)",
-4.343805421853684),
("Thursdayfrom <datetime> - <datetime> (interval)",
-4.566948973167894),
("Thursdayfrom <time-of-day> - <time-of-day> (interval)",
-4.566948973167894),
("tomorrowuntil <time-of-day>", -4.566948973167894),
("absorption of , after named day<day-of-month> (non ordinal) <named-month>",
-4.343805421853684),
("the <day-of-month> (ordinal)March", -4.343805421853684),
("after <time-of-day>at <time-of-day>", -4.566948973167894),
("intersect by \",\"<day-of-month> (non ordinal) <named-month>",
-4.343805421853684),
("Mondaythe <day-of-month> (ordinal)", -5.260096153727839),
("tomorrowafter <time-of-day>", -4.566948973167894),
("from <time-of-day> - <time-of-day> (interval)on <date>",
-4.343805421853684),
("Sunday<day-of-month> (non ordinal) <named-month>",
-5.260096153727839),
("dayminute", -2.4268828096716226),
("Tuesdaythis <time>", -4.566948973167894),
("from <datetime> - <datetime> (interval)on <date>",
-4.566948973167894),
("<ordinal> <cycle> of <time>year", -4.854631045619675),
("minuteday", -2.519256129802638),
("absorption of , after named dayintersect",
-5.260096153727839),
("Octoberyear", -3.6506582412937383),
("yearhh:mm", -5.260096153727839),
("absorption of , after named dayintersect by \",\"",
-4.854631045619675),
("Septemberyear", -4.343805421853684),
("at <time-of-day>on <date>", -5.260096153727839),
("between <time-of-day> and <time-of-day> (interval)on <date>",
-5.260096153727839),
("between <datetime> and <datetime> (interval)on <date>",
-5.260096153727839),
("dayweek", -4.161483865059729),
("weekyear", -4.343805421853684),
("hh:mmtomorrow", -4.343805421853684),
("this <cycle>at <time-of-day>", -4.854631045619675),
("Wednesdaythis <time>", -4.343805421853684),
("tomorrowat <time-of-day>", -3.6506582412937383),
("at <time-of-day>tomorrow", -4.566948973167894),
("intersectFebruary", -5.260096153727839),
("last <cycle> of <time>year", -4.343805421853684),
("Mondaythis <time>", -5.260096153727839),
("<day-of-month> (non ordinal) <named-month>year",
-4.854631045619675),
("yearminute", -5.260096153727839)],
n = 154},
koData =
ClassData{prior = -1.1831696766961728, unseen = -5.365976015021851,
likelihoods =
HashMap.fromList
[("dayhour", -2.7963428082478883),
("daymonth", -2.2702497123511094),
("monthyear", -3.751854253275325),
("Marchyear", -3.9749978045895347),
("intersect by \"of\", \"from\", \"'s\"year",
-3.4153820166541116),
("Sundaythis <time>", -3.751854253275325),
("absorption of , after named dayJuly", -4.2626798770413155),
("dd/mmat <time-of-day>", -3.4153820166541116),
("hourhour", -3.0587070727153796),
("dayyear", -3.2818506240295893),
("Thursdayat <time-of-day>", -2.9633968929110543),
("Thursdayhh:mm", -4.2626798770413155),
("WednesdayFebruary", -4.66814498514948),
("Thursdayfrom <datetime> - <datetime> (interval)",
-4.66814498514948),
("in|during the <part-of-day>until <time-of-day>",
-3.9749978045895347),
("Aprilyear", -4.66814498514948),
("until <time-of-day>on <date>", -4.66814498514948),
("dayminute", -3.2818506240295893),
("Tuesdaythis <time>", -3.751854253275325),
("minuteday", -3.164067588373206),
("hh:mmon <date>", -3.2818506240295893),
("Wednesdaythis <time>", -3.4153820166541116),
("absorption of , after named dayFebruary",
-4.2626798770413155),
("this <part-of-day>until <time-of-day>", -3.9749978045895347),
("tomorrownoon", -4.66814498514948),
("this <time>until <time-of-day>", -3.9749978045895347),
("Mondaythis <time>", -4.2626798770413155)],
n = 68}}),
("<ordinal> <cycle> of <time>",
Classifier{okData =
ClassData{prior = 0.0, unseen = -2.890371757896165,
likelihoods =
HashMap.fromList
[("daymonth", -1.7346010553881064),
("ordinals (first..19st)week (grain)intersect",
-1.7346010553881064),
("ordinals (first..19st)week (grain)October",
-1.7346010553881064),
("weekmonth", -1.2237754316221157),
("ordinals (first..19st)day (grain)October",
-1.7346010553881064)],
n = 6},
koData =
ClassData{prior = -infinity, unseen = -1.791759469228055,
likelihoods = HashMap.fromList [], n = 0}}),
("season",
Classifier{okData =
ClassData{prior = 0.0, unseen = -1.791759469228055,
likelihoods = HashMap.fromList [("", 0.0)], n = 4},
koData =
ClassData{prior = -infinity, unseen = -0.6931471805599453,
likelihoods = HashMap.fromList [], n = 0}}),
("year (grain)",
Classifier{okData =
ClassData{prior = 0.0, unseen = -2.772588722239781,
likelihoods = HashMap.fromList [("", 0.0)], n = 14},
koData =
ClassData{prior = -infinity, unseen = -0.6931471805599453,
likelihoods = HashMap.fromList [], n = 0}}),
("from <datetime> - <datetime> (interval)",
Classifier{okData =
ClassData{prior = -0.8472978603872037,
unseen = -3.1780538303479458,
likelihoods =
HashMap.fromList
[("hh:mmtime-of-day (latent)", -2.4423470353692043),
("minuteminute", -1.5260563034950494),
("time-of-day (latent)time-of-day (latent)",
-2.4423470353692043),
("hh:mmhh:mm", -1.5260563034950494),
("hourhour", -2.4423470353692043),
("minutehour", -2.4423470353692043)],
n = 6},
koData =
ClassData{prior = -0.5596157879354228, unseen = -3.332204510175204,
likelihoods =
HashMap.fromList
[("yearhour", -2.6026896854443837),
("hh:mmtime-of-day (latent)", -1.9095425048844386),
("minuteminute", -1.9095425048844386),
("yearyear", -2.6026896854443837),
("year (latent)year (latent)", -2.6026896854443837),
("minutehour", -1.9095425048844386),
("hh:mmintersect", -1.9095425048844386),
("year (latent)time-of-day (latent)", -2.6026896854443837)],
n = 8}}),
("Saturday",
Classifier{okData =
ClassData{prior = 0.0, unseen = -2.0794415416798357,
likelihoods = HashMap.fromList [("", 0.0)], n = 6},
koData =
ClassData{prior = -infinity, unseen = -0.6931471805599453,
likelihoods = HashMap.fromList [], n = 0}}),
("next <cycle>",
Classifier{okData =
ClassData{prior = 0.0, unseen = -3.044522437723423,
likelihoods =
HashMap.fromList
[("week", -1.6094379124341003),
("month (grain)", -2.3025850929940455),
("year (grain)", -2.3025850929940455),
("week (grain)", -1.6094379124341003),
("quarter", -2.3025850929940455), ("year", -2.3025850929940455),
("month", -2.3025850929940455),
("quarter (grain)", -2.3025850929940455)],
n = 6},
koData =
ClassData{prior = -infinity, unseen = -2.1972245773362196,
likelihoods = HashMap.fromList [], n = 0}}),
("number.number hours",
Classifier{okData =
ClassData{prior = 0.0, unseen = -1.0986122886681098,
likelihoods = HashMap.fromList [("", 0.0)], n = 1},
koData =
ClassData{prior = -infinity, unseen = -0.6931471805599453,
likelihoods = HashMap.fromList [], n = 0}}),
("from <time-of-day> - <time-of-day> (interval)",
Classifier{okData =
ClassData{prior = -0.45198512374305727,
unseen = -3.044522437723423,
likelihoods =
HashMap.fromList
[("hh:mmtime-of-day (latent)", -2.3025850929940455),
("minuteminute", -1.2039728043259361),
("time-of-day (latent)time-of-day (latent)",
-2.3025850929940455),
("hh:mmhh:mm", -1.2039728043259361),
("hourhour", -2.3025850929940455),
("minutehour", -2.3025850929940455)],
n = 7},
koData =
ClassData{prior = -1.0116009116784799, unseen = -2.70805020110221,
likelihoods =
HashMap.fromList
[("hh:mmtime-of-day (latent)", -1.0296194171811581),
("minutehour", -1.0296194171811581)],
n = 4}}),
("yyyy-mm-dd",
Classifier{okData =
ClassData{prior = 0.0, unseen = -1.3862943611198906,
likelihoods = HashMap.fromList [("", 0.0)], n = 2},
koData =
ClassData{prior = -infinity, unseen = -0.6931471805599453,
likelihoods = HashMap.fromList [], n = 0}}),
("year (latent)",
Classifier{okData =
ClassData{prior = -infinity, unseen = -1.0986122886681098,
likelihoods = HashMap.fromList [], n = 0},
koData =
ClassData{prior = 0.0, unseen = -3.1354942159291497,
likelihoods =
HashMap.fromList
[("integer (numeric)", -0.5260930958967791),
("integer (0..19)", -0.8938178760220964)],
n = 20}}),
("Monday",
Classifier{okData =
ClassData{prior = 0.0, unseen = -2.4849066497880004,
likelihoods = HashMap.fromList [("", 0.0)], n = 10},
koData =
ClassData{prior = -infinity, unseen = -0.6931471805599453,
likelihoods = HashMap.fromList [], n = 0}}),
("dd/mm/yyyy",
Classifier{okData =
ClassData{prior = 0.0, unseen = -2.1972245773362196,
likelihoods = HashMap.fromList [("", 0.0)], n = 7},
koData =
ClassData{prior = -infinity, unseen = -0.6931471805599453,
likelihoods = HashMap.fromList [], n = 0}}),
("yesterday",
Classifier{okData =
ClassData{prior = 0.0, unseen = -1.3862943611198906,
likelihoods = HashMap.fromList [("", 0.0)], n = 2},
koData =
ClassData{prior = -infinity, unseen = -0.6931471805599453,
likelihoods = HashMap.fromList [], n = 0}}),
("<ordinal> quarter <year>",
Classifier{okData =
ClassData{prior = 0.0, unseen = -2.0794415416798357,
likelihoods =
HashMap.fromList
[("ordinals (first..19st)quarter (grain)year",
-1.252762968495368),
("quarteryear", -0.8472978603872037),
("ordinal (digits)quarter (grain)year", -1.252762968495368)],
n = 2},
koData =
ClassData{prior = -infinity, unseen = -1.3862943611198906,
likelihoods = HashMap.fromList [], n = 0}}),
("after lunch",
Classifier{okData =
ClassData{prior = 0.0, unseen = -1.0986122886681098,
likelihoods = HashMap.fromList [("", 0.0)], n = 1},
koData =
ClassData{prior = -infinity, unseen = -0.6931471805599453,
likelihoods = HashMap.fromList [], n = 0}}),
("hh:mm:ss",
Classifier{okData =
ClassData{prior = 0.0, unseen = -1.0986122886681098,
likelihoods = HashMap.fromList [("", 0.0)], n = 1},
koData =
ClassData{prior = -infinity, unseen = -0.6931471805599453,
likelihoods = HashMap.fromList [], n = 0}}),
("the <cycle> after <time>",
Classifier{okData =
ClassData{prior = -infinity, unseen = -1.0986122886681098,
likelihoods = HashMap.fromList [], n = 0},
koData =
ClassData{prior = 0.0, unseen = -1.9459101490553135,
likelihoods =
HashMap.fromList
[("year (grain)christmas eve", -0.6931471805599453),
("yearday", -0.6931471805599453)],
n = 2}}),
("quarter to|till|before <integer> (hour-of-day)",
Classifier{okData =
ClassData{prior = 0.0, unseen = -1.9459101490553135,
likelihoods =
HashMap.fromList
[("time-of-day (latent)", -0.6931471805599453),
("hour", -0.6931471805599453)],
n = 2},
koData =
ClassData{prior = -infinity, unseen = -1.0986122886681098,
likelihoods = HashMap.fromList [], n = 0}}),
("a pair",
Classifier{okData =
ClassData{prior = 0.0, unseen = -1.0986122886681098,
likelihoods = HashMap.fromList [("", 0.0)], n = 1},
koData =
ClassData{prior = -infinity, unseen = -0.6931471805599453,
likelihoods = HashMap.fromList [], n = 0}}),
("nth <time> of <time>",
Classifier{okData =
ClassData{prior = -0.5596157879354228,
unseen = -3.1354942159291497,
likelihoods =
HashMap.fromList
[("daymonth", -0.8938178760220964),
("ordinals (first..19st)Tuesdayintersect", -1.9924301646902063),
("ordinals (first..19st)TuesdayOctober", -1.9924301646902063),
("ordinals (first..19st)Wednesdayintersect",
-1.4816045409242156)],
n = 8},
koData =
ClassData{prior = -0.8472978603872037,
unseen = -2.9444389791664407,
likelihoods =
HashMap.fromList
[("daymonth", -0.9444616088408514),
("ordinals (first..19st)WednesdayOctober", -1.2809338454620642),
("ordinals (first..19st)TuesdaySeptember", -1.791759469228055)],
n = 6}}),
("the <day-of-month> (non ordinal)",
Classifier{okData =
ClassData{prior = 0.0, unseen = -2.3025850929940455,
likelihoods = HashMap.fromList [("integer (numeric)", 0.0)],
n = 8},
koData =
ClassData{prior = -infinity, unseen = -0.6931471805599453,
likelihoods = HashMap.fromList [], n = 0}}),
("April",
Classifier{okData =
ClassData{prior = 0.0, unseen = -1.6094379124341003,
likelihoods = HashMap.fromList [("", 0.0)], n = 3},
koData =
ClassData{prior = -infinity, unseen = -0.6931471805599453,
likelihoods = HashMap.fromList [], n = 0}}),
("week (grain)",
Classifier{okData =
ClassData{prior = 0.0, unseen = -3.332204510175204,
likelihoods = HashMap.fromList [("", 0.0)], n = 26},
koData =
ClassData{prior = -infinity, unseen = -0.6931471805599453,
likelihoods = HashMap.fromList [], n = 0}}),
("now",
Classifier{okData =
ClassData{prior = -1.0986122886681098,
unseen = -1.3862943611198906,
likelihoods = HashMap.fromList [("", 0.0)], n = 2},
koData =
ClassData{prior = -0.40546510810816444,
unseen = -1.791759469228055,
likelihoods = HashMap.fromList [("", 0.0)], n = 4}}),
("this <part-of-day>",
Classifier{okData =
ClassData{prior = -2.1972245773362196,
unseen = -2.0794415416798357,
likelihoods =
HashMap.fromList
[("hour", -0.8472978603872037), ("evening", -1.252762968495368),
("morning", -1.252762968495368)],
n = 2},
koData =
ClassData{prior = -0.11778303565638351, unseen = -3.58351893845611,
likelihoods =
HashMap.fromList
[("hour", -0.7221347174331976),
("morning", -0.7221347174331976)],
n = 16}}),
("christmas eve",
Classifier{okData =
ClassData{prior = 0.0, unseen = -1.3862943611198906,
likelihoods = HashMap.fromList [("", 0.0)], n = 2},
koData =
ClassData{prior = -infinity, unseen = -0.6931471805599453,
likelihoods = HashMap.fromList [], n = 0}}),
("<day-of-month>(ordinal) <named-month>",
Classifier{okData =
ClassData{prior = -9.53101798043249e-2,
unseen = -3.258096538021482,
likelihoods =
HashMap.fromList
[("ordinals (first..19st)March", -1.8325814637483102),
("ordinal (digits)February", -1.8325814637483102),
("month", -0.8209805520698302),
("ordinal (digits)March", -1.6094379124341003)],
n = 10},
koData =
ClassData{prior = -2.3978952727983707,
unseen = -2.0794415416798357,
likelihoods =
HashMap.fromList
[("ordinals (first..19st)April", -1.252762968495368),
("month", -1.252762968495368)],
n = 1}}),
("numbers prefix with -, negative or minus",
Classifier{okData =
ClassData{prior = -infinity, unseen = -0.6931471805599453,
likelihoods = HashMap.fromList [], n = 0},
koData =
ClassData{prior = 0.0, unseen = -3.4339872044851463,
likelihoods = HashMap.fromList [("integer (numeric)", 0.0)],
n = 29}}),
("Friday",
Classifier{okData =
ClassData{prior = 0.0, unseen = -2.3025850929940455,
likelihoods = HashMap.fromList [("", 0.0)], n = 8},
koData =
ClassData{prior = -infinity, unseen = -0.6931471805599453,
likelihoods = HashMap.fromList [], n = 0}}),
("in|during the <part-of-day>",
Classifier{okData =
ClassData{prior = -2.1972245773362196,
unseen = -2.1972245773362196,
likelihoods =
HashMap.fromList
[("afternoon", -1.3862943611198906),
("hour", -0.9808292530117262),
("evening", -1.3862943611198906)],
n = 2},
koData =
ClassData{prior = -0.11778303565638351,
unseen = -3.6109179126442243,
likelihoods =
HashMap.fromList
[("hour", -0.750305594399894), ("morning", -0.750305594399894)],
n = 16}}),
("new year's eve",
Classifier{okData =
ClassData{prior = 0.0, unseen = -1.3862943611198906,
likelihoods = HashMap.fromList [("", 0.0)], n = 2},
koData =
ClassData{prior = -infinity, unseen = -0.6931471805599453,
likelihoods = HashMap.fromList [], n = 0}}),
("tomorrow",
Classifier{okData =
ClassData{prior = 0.0, unseen = -3.044522437723423,
likelihoods = HashMap.fromList [("", 0.0)], n = 19},
koData =
ClassData{prior = -infinity, unseen = -0.6931471805599453,
likelihoods = HashMap.fromList [], n = 0}}),
("<cycle> after <time>",
Classifier{okData =
ClassData{prior = -infinity, unseen = -1.0986122886681098,
likelihoods = HashMap.fromList [], n = 0},
koData =
ClassData{prior = 0.0, unseen = -1.9459101490553135,
likelihoods =
HashMap.fromList
[("year (grain)christmas eve", -0.6931471805599453),
("yearday", -0.6931471805599453)],
n = 2}}),
("<time> after next",
Classifier{okData =
ClassData{prior = 0.0, unseen = -2.639057329615259,
likelihoods =
HashMap.fromList
[("Wednesday", -1.8718021769015913),
("Friday", -1.466337068793427), ("day", -1.1786549963416462),
("March", -1.8718021769015913), ("month", -1.8718021769015913)],
n = 4},
koData =
ClassData{prior = -infinity, unseen = -1.791759469228055,
likelihoods = HashMap.fromList [], n = 0}}),
("half an hour",
Classifier{okData =
ClassData{prior = -0.5108256237659907,
unseen = -1.6094379124341003,
likelihoods = HashMap.fromList [("", 0.0)], n = 3},
koData =
ClassData{prior = -0.916290731874155, unseen = -1.3862943611198906,
likelihoods = HashMap.fromList [("", 0.0)], n = 2}}),
("the <day-of-month> (ordinal)",
Classifier{okData =
ClassData{prior = 0.0, unseen = -2.3978952727983707,
likelihoods =
HashMap.fromList
[("ordinals (first..19st)", -1.2039728043259361),
("ordinal (digits)", -0.35667494393873245)],
n = 8},
koData =
ClassData{prior = -infinity, unseen = -1.0986122886681098,
likelihoods = HashMap.fromList [], n = 0}}),
("fractional number",
Classifier{okData =
ClassData{prior = -infinity, unseen = -0.6931471805599453,
likelihoods = HashMap.fromList [], n = 0},
koData =
ClassData{prior = 0.0, unseen = -2.3025850929940455,
likelihoods = HashMap.fromList [("", 0.0)], n = 8}}),
("Sunday",
Classifier{okData =
ClassData{prior = 0.0, unseen = -2.3978952727983707,
likelihoods = HashMap.fromList [("", 0.0)], n = 9},
koData =
ClassData{prior = -infinity, unseen = -0.6931471805599453,
likelihoods = HashMap.fromList [], n = 0}}),
("afternoon",
Classifier{okData =
ClassData{prior = 0.0, unseen = -1.0986122886681098,
likelihoods = HashMap.fromList [("", 0.0)], n = 1},
koData =
ClassData{prior = -infinity, unseen = -0.6931471805599453,
likelihoods = HashMap.fromList [], n = 0}}),
("<duration> from now",
Classifier{okData =
ClassData{prior = 0.0, unseen = -2.890371757896165,
likelihoods =
HashMap.fromList
[("second", -1.4469189829363254), ("year", -2.1400661634962708),
("<integer> <unit-of-duration>", -1.041453874828161),
("a <unit-of-duration>", -2.1400661634962708),
("minute", -1.7346010553881064)],
n = 6},
koData =
ClassData{prior = -infinity, unseen = -1.791759469228055,
likelihoods = HashMap.fromList [], n = 0}}),
("February",
Classifier{okData =
ClassData{prior = 0.0, unseen = -2.4849066497880004,
likelihoods = HashMap.fromList [("", 0.0)], n = 10},
koData =
ClassData{prior = -infinity, unseen = -0.6931471805599453,
likelihoods = HashMap.fromList [], n = 0}}),
("this <cycle>",
Classifier{okData =
ClassData{prior = -9.53101798043249e-2,
unseen = -3.367295829986474,
likelihoods =
HashMap.fromList
[("week", -1.7227665977411035),
("year (grain)", -2.2335922215070942),
("week (grain)", -1.7227665977411035),
("day", -1.9459101490553135), ("quarter", -2.639057329615259),
("year", -2.2335922215070942),
("quarter (grain)", -2.639057329615259),
("day (grain)", -1.9459101490553135)],
n = 10},
koData =
ClassData{prior = -2.3978952727983707,
unseen = -2.3978952727983707,
likelihoods =
HashMap.fromList
[("day", -1.6094379124341003),
("day (grain)", -1.6094379124341003)],
n = 1}}),
("minute (grain)",
Classifier{okData =
ClassData{prior = 0.0, unseen = -2.833213344056216,
likelihoods = HashMap.fromList [("", 0.0)], n = 15},
koData =
ClassData{prior = -infinity, unseen = -0.6931471805599453,
likelihoods = HashMap.fromList [], n = 0}}),
("ordinals (first..19st)",
Classifier{okData =
ClassData{prior = -5.406722127027582e-2,
unseen = -2.995732273553991,
likelihoods = HashMap.fromList [("", 0.0)], n = 18},
koData =
ClassData{prior = -2.9444389791664407,
unseen = -1.0986122886681098,
likelihoods = HashMap.fromList [("", 0.0)], n = 1}}),
("about <time-of-day>",
Classifier{okData =
ClassData{prior = -0.6931471805599453,
unseen = -2.5649493574615367,
likelihoods =
HashMap.fromList
[("time-of-day (latent)", -0.8754687373538999),
("hour", -0.8754687373538999)],
n = 4},
koData =
ClassData{prior = -0.6931471805599453,
unseen = -2.5649493574615367,
likelihoods =
HashMap.fromList
[("time-of-day (latent)", -1.3862943611198906),
("hour", -1.3862943611198906),
("<hour-of-day> half (as relative minutes)",
-1.3862943611198906),
("minute", -1.3862943611198906)],
n = 4}}),
("time-of-day (latent)",
Classifier{okData =
ClassData{prior = -0.7105389232718146, unseen = -4.0943445622221,
likelihoods =
HashMap.fromList [("integer (numeric)", -1.7094433359300068e-2)],
n = 57},
koData =
ClassData{prior = -0.6760527472006452, unseen = -4.127134385045092,
likelihoods =
HashMap.fromList
[("integer (numeric)", -0.2607262624632527),
("integer (0..19)", -1.4718165345580525)],
n = 59}}),
("year",
Classifier{okData =
ClassData{prior = -0.2231435513142097, unseen = -3.258096538021482,
likelihoods = HashMap.fromList [("integer (numeric)", 0.0)],
n = 24},
koData =
ClassData{prior = -1.6094379124341003,
unseen = -2.0794415416798357,
likelihoods = HashMap.fromList [("integer (numeric)", 0.0)],
n = 6}}),
("last <day-of-week> of <time>",
Classifier{okData =
ClassData{prior = 0.0, unseen = -2.833213344056216,
likelihoods =
HashMap.fromList
[("daymonth", -0.8266785731844679),
("SundayMarch", -1.6739764335716716),
("MondayMarch", -1.6739764335716716),
("Sundayintersect", -1.6739764335716716)],
n = 6},
koData =
ClassData{prior = -infinity, unseen = -1.6094379124341003,
likelihoods = HashMap.fromList [], n = 0}}),
("<integer> <unit-of-duration>",
Classifier{okData =
ClassData{prior = -0.5408064559779265, unseen = -4.74493212836325,
likelihoods =
HashMap.fromList
[("week", -2.538973871058276),
("integer (0..19)year (grain)", -3.349904087274605),
("integer (numeric)day (grain)", -2.9444389791664407),
("integer (0..19)hour (grain)", -4.04305126783455),
("second", -2.9444389791664407),
("integer (numeric)second (grain)", -4.04305126783455),
("a pairhour (grain)", -4.04305126783455),
("integer (numeric)year (grain)", -3.6375861597263857),
("day", -2.338303175596125), ("year", -2.9444389791664407),
("integer (numeric)week (grain)", -3.349904087274605),
("integer (0..19)month (grain)", -4.04305126783455),
("integer (0..19)second (grain)", -3.126760535960395),
("hour", -2.9444389791664407), ("month", -3.6375861597263857),
("integer (numeric)minute (grain)", -2.790288299339182),
("integer (0..19)minute (grain)", -2.9444389791664407),
("integer (numeric)month (grain)", -4.04305126783455),
("minute", -2.2512917986064953),
("integer (numeric)hour (grain)", -3.349904087274605),
("integer (0..19)day (grain)", -2.9444389791664407),
("integer (0..19)week (grain)", -2.9444389791664407)],
n = 46},
koData =
ClassData{prior = -0.8729402910005413, unseen = -4.48863636973214,
likelihoods =
HashMap.fromList
[("week", -2.6855773452501515),
("integer (0..19)year (grain)", -3.378724525810097),
("integer (numeric)day (grain)", -3.0910424533583156),
("integer (0..19)hour (grain)", -3.784189633918261),
("second", -2.867898902044106),
("integer (numeric)second (grain)", -3.378724525810097),
("integer (numeric)year (grain)", -3.0910424533583156),
("day", -2.6855773452501515), ("year", -2.6855773452501515),
("integer (numeric)week (grain)", -3.378724525810097),
("integer (0..19)month (grain)", -3.0910424533583156),
("integer (0..19)second (grain)", -3.378724525810097),
("hour", -2.6855773452501515), ("month", -2.6855773452501515),
("integer (numeric)minute (grain)", -3.378724525810097),
("integer (0..19)minute (grain)", -3.378724525810097),
("integer (numeric)month (grain)", -3.378724525810097),
("minute", -2.867898902044106),
("integer (numeric)hour (grain)", -2.867898902044106),
("integer (0..19)day (grain)", -3.378724525810097),
("integer (0..19)week (grain)", -3.0910424533583156)],
n = 33}}),
("<duration> after <time>",
Classifier{okData =
ClassData{prior = 0.0, unseen = -2.3025850929940455,
likelihoods =
HashMap.fromList
[("a <unit-of-duration>christmas eve", -1.5040773967762742),
("yearday", -0.8109302162163288),
("<integer> <unit-of-duration>christmas eve",
-1.0986122886681098)],
n = 3},
koData =
ClassData{prior = -infinity, unseen = -1.3862943611198906,
likelihoods = HashMap.fromList [], n = 0}}),
("relative minutes after|past <integer> (hour-of-day)",
Classifier{okData =
ClassData{prior = 0.0, unseen = -1.9459101490553135,
likelihoods =
HashMap.fromList
[("hour", -0.6931471805599453),
("integer (numeric)time-of-day (latent)", -0.6931471805599453)],
n = 2},
koData =
ClassData{prior = -infinity, unseen = -1.0986122886681098,
likelihoods = HashMap.fromList [], n = 0}}),
("a <unit-of-duration>",
Classifier{okData =
ClassData{prior = 0.0, unseen = -3.2188758248682006,
likelihoods =
HashMap.fromList
[("week", -2.0794415416798357),
("year (grain)", -2.4849066497880004),
("second", -2.0794415416798357),
("week (grain)", -2.0794415416798357),
("day", -2.4849066497880004),
("minute (grain)", -2.4849066497880004),
("year", -2.4849066497880004),
("second (grain)", -2.0794415416798357),
("minute", -2.4849066497880004),
("day (grain)", -2.4849066497880004)],
n = 7},
koData =
ClassData{prior = -infinity, unseen = -2.3978952727983707,
likelihoods = HashMap.fromList [], n = 0}}),
("intersect by \",\"",
Classifier{okData =
ClassData{prior = -9.763846956391606e-2,
unseen = -4.709530201312334,
likelihoods =
HashMap.fromList
[("Wednesday<named-month> <day-of-month> (non ordinal)",
-4.007333185232471),
("<named-month> <day-of-month> (non ordinal)Friday",
-4.007333185232471),
("Friday<named-month> <day-of-month> (non ordinal)",
-3.6018680771243066),
("at <time-of-day>Saturday", -3.6018680771243066),
("intersect by \",\"year", -4.007333185232471),
("hh:mmintersect by \",\"", -3.6018680771243066),
("dayday", -2.3025850929940455),
("dayyear", -3.0910424533583156),
("Saturday<day-of-month> (non ordinal) <named-month>",
-3.6018680771243066),
("<named-month> <day-of-month> (non ordinal)intersect",
-4.007333185232471),
("intersect by \",\"<day-of-month> (non ordinal) <named-month>",
-3.0910424533583156),
("Fridayintersect", -4.007333185232471),
("hh:mmintersect", -3.6018680771243066),
("intersect by \",\"intersect", -4.007333185232471),
("hh:mmSaturday", -3.6018680771243066),
("at <time-of-day>intersect", -3.6018680771243066),
("Sunday<day-of-month> (non ordinal) <named-month>",
-4.007333185232471),
("dayminute", -2.908720896564361),
("intersectyear", -4.007333185232471),
("minuteday", -1.6559579280689931),
("hh:mmabsorption of , after named day", -3.6018680771243066),
("at <time-of-day>intersect by \",\"", -3.6018680771243066),
("at <time-of-day>absorption of , after named day",
-3.6018680771243066),
("intersectintersect", -4.007333185232471),
("Fridayintersect by \",\"", -3.6018680771243066),
("Monday<named-month> <day-of-month> (non ordinal)",
-4.007333185232471),
("Monday<day-of-month> (non ordinal) <named-month>",
-4.007333185232471),
("<named-month> <day-of-month> (non ordinal)year",
-3.6018680771243066)],
n = 39},
koData =
ClassData{prior = -2.374905754573672, unseen = -3.713572066704308,
likelihoods =
HashMap.fromList
[("daymonth", -2.0794415416798357),
("FridayJuly", -2.5902671654458267),
("WednesdayFebruary", -2.995732273553991),
("MondayFebruary", -2.995732273553991)],
n = 4}}),
("hh:mm",
Classifier{okData =
ClassData{prior = -1.526747213078842e-2,
unseen = -4.204692619390966,
likelihoods = HashMap.fromList [("", 0.0)], n = 65},
koData =
ClassData{prior = -4.189654742026425, unseen = -1.0986122886681098,
likelihoods = HashMap.fromList [("", 0.0)], n = 1}}),
("quarter after|past <integer> (hour-of-day)",
Classifier{okData =
ClassData{prior = 0.0, unseen = -1.9459101490553135,
likelihoods =
HashMap.fromList
[("time-of-day (latent)", -0.6931471805599453),
("hour", -0.6931471805599453)],
n = 2},
koData =
ClassData{prior = -infinity, unseen = -1.0986122886681098,
likelihoods = HashMap.fromList [], n = 0}}),
("second (grain)",
Classifier{okData =
ClassData{prior = 0.0, unseen = -2.3978952727983707,
likelihoods = HashMap.fromList [("", 0.0)], n = 9},
koData =
ClassData{prior = -infinity, unseen = -0.6931471805599453,
likelihoods = HashMap.fromList [], n = 0}}),
("<time-of-day> sharp",
Classifier{okData =
ClassData{prior = 0.0, unseen = -2.772588722239781,
likelihoods =
HashMap.fromList
[("at <time-of-day>", -1.3217558399823195),
("time-of-day (latent)", -1.3217558399823195),
("hour", -0.7621400520468967)],
n = 6},
koData =
ClassData{prior = -infinity, unseen = -1.3862943611198906,
likelihoods = HashMap.fromList [], n = 0}}),
("intersect by \"of\", \"from\", \"'s\"",
Classifier{okData =
ClassData{prior = -0.8209805520698302,
unseen = -3.6375861597263857,
likelihoods =
HashMap.fromList
[("Sundaylast <cycle>", -2.917770732084279),
("daymonth", -1.6650077635889111),
("Wednesdayintersect", -2.512305623976115),
("Wednesdaynext <cycle>", -2.917770732084279),
("Tuesdaythis <cycle>", -2.917770732084279),
("WednesdayOctober", -2.512305623976115),
("Wednesdaythis <cycle>", -2.917770732084279),
("TuesdayOctober", -2.512305623976115),
("Mondaythis <cycle>", -2.917770732084279),
("dayweek", -1.8191584434161694)],
n = 11},
koData =
ClassData{prior = -0.579818495252942, unseen = -3.784189633918261,
likelihoods =
HashMap.fromList
[("daymonth", -1.0531499145913523),
("TuesdaySeptember", -2.662587827025453),
("Wednesdayintersect", -2.662587827025453),
("WednesdayOctober", -2.662587827025453),
("SundayMarch", -2.662587827025453),
("MondayMarch", -2.662587827025453),
("Tuesdayintersect", -2.662587827025453),
("Sundayintersect", -2.662587827025453)],
n = 14}}),
("<duration> ago",
Classifier{okData =
ClassData{prior = 0.0, unseen = -3.5553480614894135,
likelihoods =
HashMap.fromList
[("week", -1.580450375560848), ("day", -1.916922612182061),
("year", -2.4277482359480516),
("<integer> <unit-of-duration>", -0.8873031950009028),
("a <unit-of-duration>", -2.833213344056216),
("month", -2.4277482359480516)],
n = 14},
koData =
ClassData{prior = -infinity, unseen = -1.9459101490553135,
likelihoods = HashMap.fromList [], n = 0}}),
("last <time>",
Classifier{okData =
ClassData{prior = -2.751535313041949, unseen = -2.833213344056216,
likelihoods =
HashMap.fromList
[("day", -1.6739764335716716), ("Sunday", -2.0794415416798357),
("hour", -2.0794415416798357), ("Tuesday", -2.0794415416798357),
("week-end", -2.0794415416798357)],
n = 3},
koData =
ClassData{prior = -6.59579677917974e-2, unseen = -4.59511985013459,
likelihoods =
HashMap.fromList
[("year (latent)", -1.6405284995041316),
("Monday", -3.4863551900024623), ("day", -2.3877429013343523),
("Sunday", -3.4863551900024623),
("time-of-day (latent)", -1.6405284995041316),
("year", -1.6405284995041316),
("intersect by \"of\", \"from\", \"'s\"", -2.9755295662364714),
("hour", -1.6405284995041316)],
n = 44}}),
("March",
Classifier{okData =
ClassData{prior = 0.0, unseen = -2.833213344056216,
likelihoods = HashMap.fromList [("", 0.0)], n = 15},
koData =
ClassData{prior = -infinity, unseen = -0.6931471805599453,
likelihoods = HashMap.fromList [], n = 0}}),
("the day after tomorrow",
Classifier{okData =
ClassData{prior = 0.0, unseen = -1.0986122886681098,
likelihoods = HashMap.fromList [("", 0.0)], n = 1},
koData =
ClassData{prior = -infinity, unseen = -0.6931471805599453,
likelihoods = HashMap.fromList [], n = 0}}),
("noon",
Classifier{okData =
ClassData{prior = -infinity, unseen = -0.6931471805599453,
likelihoods = HashMap.fromList [], n = 0},
koData =
ClassData{prior = 0.0, unseen = -1.0986122886681098,
likelihoods = HashMap.fromList [("", 0.0)], n = 1}}),
("until <time-of-day>",
Classifier{okData =
ClassData{prior = -0.1823215567939546, unseen = -3.258096538021482,
likelihoods =
HashMap.fromList
[("at <time-of-day>", -0.8209805520698302),
("hour", -0.8209805520698302)],
n = 10},
koData =
ClassData{prior = -1.791759469228055, unseen = -2.3025850929940455,
likelihoods =
HashMap.fromList
[("intersect", -1.5040773967762742),
("hh:mm", -1.5040773967762742),
("minute", -1.0986122886681098)],
n = 2}}),
("<integer> and an half hours",
Classifier{okData =
ClassData{prior = 0.0, unseen = -1.6094379124341003,
likelihoods =
HashMap.fromList
[("integer (numeric)", -0.6931471805599453),
("integer (0..19)", -0.6931471805599453)],
n = 2},
koData =
ClassData{prior = -infinity, unseen = -1.0986122886681098,
likelihoods = HashMap.fromList [], n = 0}}),
("after <duration>",
Classifier{okData =
ClassData{prior = 0.0, unseen = -1.9459101490553135,
likelihoods =
HashMap.fromList
[("day", -0.6931471805599453),
("<integer> <unit-of-duration>", -0.6931471805599453)],
n = 2},
koData =
ClassData{prior = -infinity, unseen = -1.0986122886681098,
likelihoods = HashMap.fromList [], n = 0}}),
("evening",
Classifier{okData =
ClassData{prior = 0.0, unseen = -1.6094379124341003,
likelihoods = HashMap.fromList [("", 0.0)], n = 3},
koData =
ClassData{prior = -infinity, unseen = -0.6931471805599453,
likelihoods = HashMap.fromList [], n = 0}}),
("decimal number",
Classifier{okData =
ClassData{prior = -infinity, unseen = -0.6931471805599453,
likelihoods = HashMap.fromList [], n = 0},
koData =
ClassData{prior = 0.0, unseen = -1.0986122886681098,
likelihoods = HashMap.fromList [("", 0.0)], n = 1}}),
("next <time>",
Classifier{okData =
ClassData{prior = -0.587786664902119, unseen = -2.890371757896165,
likelihoods =
HashMap.fromList
[("Wednesday", -2.1400661634962708),
("Monday", -2.1400661634962708), ("day", -1.2237754316221157),
("March", -2.1400661634962708), ("month", -2.1400661634962708),
("Tuesday", -1.7346010553881064)],
n = 5},
koData =
ClassData{prior = -0.8109302162163288, unseen = -2.772588722239781,
likelihoods =
HashMap.fromList
[("Wednesday", -2.0149030205422647),
("Friday", -1.6094379124341003), ("day", -1.3217558399823195),
("March", -2.0149030205422647), ("month", -2.0149030205422647)],
n = 4}}),
("last <cycle>",
Classifier{okData =
ClassData{prior = -0.6931471805599453, unseen = -2.833213344056216,
likelihoods =
HashMap.fromList
[("week", -1.6739764335716716),
("month (grain)", -2.0794415416798357),
("year (grain)", -2.0794415416798357),
("week (grain)", -1.6739764335716716),
("year", -2.0794415416798357), ("month", -2.0794415416798357)],
n = 4},
koData =
ClassData{prior = -0.6931471805599453, unseen = -2.833213344056216,
likelihoods =
HashMap.fromList
[("week", -1.6739764335716716),
("week (grain)", -1.6739764335716716),
("day", -1.6739764335716716),
("day (grain)", -1.6739764335716716)],
n = 4}}),
("christmas",
Classifier{okData =
ClassData{prior = 0.0, unseen = -1.6094379124341003,
likelihoods = HashMap.fromList [("", 0.0)], n = 3},
koData =
ClassData{prior = -infinity, unseen = -0.6931471805599453,
likelihoods = HashMap.fromList [], n = 0}}),
("new year's day",
Classifier{okData =
ClassData{prior = 0.0, unseen = -1.0986122886681098,
likelihoods = HashMap.fromList [("", 0.0)], n = 1},
koData =
ClassData{prior = -infinity, unseen = -0.6931471805599453,
likelihoods = HashMap.fromList [], n = 0}}),
("next n <cycle>",
Classifier{okData =
ClassData{prior = 0.0, unseen = -3.912023005428146,
likelihoods =
HashMap.fromList
[("week", -2.793208009442517),
("integer (0..19)year (grain)", -3.1986731175506815),
("integer (numeric)day (grain)", -3.1986731175506815),
("integer (0..19)hour (grain)", -3.1986731175506815),
("second", -2.793208009442517),
("integer (numeric)second (grain)", -3.1986731175506815),
("integer (numeric)year (grain)", -3.1986731175506815),
("day", -2.793208009442517), ("year", -2.793208009442517),
("integer (numeric)week (grain)", -3.1986731175506815),
("integer (0..19)month (grain)", -3.1986731175506815),
("integer (0..19)second (grain)", -3.1986731175506815),
("hour", -2.793208009442517), ("month", -2.793208009442517),
("integer (numeric)minute (grain)", -3.1986731175506815),
("integer (0..19)minute (grain)", -3.1986731175506815),
("integer (numeric)month (grain)", -3.1986731175506815),
("minute", -2.793208009442517),
("integer (numeric)hour (grain)", -3.1986731175506815),
("integer (0..19)day (grain)", -3.1986731175506815),
("integer (0..19)week (grain)", -3.1986731175506815)],
n = 14},
koData =
ClassData{prior = -infinity, unseen = -3.0910424533583156,
likelihoods = HashMap.fromList [], n = 0}}),
("in <duration>",
Classifier{okData =
ClassData{prior = 0.0, unseen = -4.418840607796598,
likelihoods =
HashMap.fromList
[("week", -3.0204248861443626),
("<integer> more <unit-of-duration>", -3.3081069585961433),
("number.number hours", -3.713572066704308),
("second", -2.797281334830153), ("day", -2.6149597780361984),
("half an hour", -3.713572066704308),
("<integer> <unit-of-duration>", -1.2286654169163076),
("a <unit-of-duration>", -2.797281334830153),
("<integer> and an half hours", -3.3081069585961433),
("hour", -2.6149597780361984), ("minute", -1.4622802680978126),
("about <duration>", -3.3081069585961433)],
n = 35},
koData =
ClassData{prior = -infinity, unseen = -2.5649493574615367,
likelihoods = HashMap.fromList [], n = 0}}),
("<datetime> - <datetime> (interval)",
Classifier{okData =
ClassData{prior = -0.5436154465889815,
unseen = -3.8501476017100584,
likelihoods =
HashMap.fromList
[("minuteminute", -1.1895840668738362),
("hh:mmhh:mm", -1.1895840668738362),
("dayday", -2.03688192726104),
("<day-of-month> (non ordinal) <named-month><day-of-month> (non ordinal) <named-month>",
-2.03688192726104)],
n = 18},
koData =
ClassData{prior = -0.8690378470236094,
unseen = -3.6109179126442243,
likelihoods =
HashMap.fromList
[("July<day-of-month> (non ordinal) <named-month>",
-2.890371757896165),
("monthday", -1.791759469228055),
("minuteminute", -1.6376087894007967),
("hh:mmhh:mm", -2.890371757896165),
("dayyear", -2.4849066497880004),
("hh:mmintersect", -1.791759469228055),
("August<day-of-month> (non ordinal) <named-month>",
-1.9740810260220096),
("dd/mmyear", -2.4849066497880004)],
n = 13}}),
("Tuesday",
Classifier{okData =
ClassData{prior = 0.0, unseen = -2.3978952727983707,
likelihoods = HashMap.fromList [("", 0.0)], n = 9},
koData =
ClassData{prior = -infinity, unseen = -0.6931471805599453,
likelihoods = HashMap.fromList [], n = 0}}),
("<time-of-day> - <time-of-day> (interval)",
Classifier{okData =
ClassData{prior = -0.6241543090729939,
unseen = -3.5553480614894135,
likelihoods =
HashMap.fromList
[("hh:mmtime-of-day (latent)", -2.4277482359480516),
("minuteminute", -0.8873031950009028),
("hh:mmhh:mm", -0.8873031950009028),
("minutehour", -2.4277482359480516)],
n = 15},
koData =
ClassData{prior = -0.7672551527136672,
unseen = -3.4339872044851463,
likelihoods =
HashMap.fromList
[("hh:mmtime-of-day (latent)", -0.8362480242006186),
("minuteminute", -2.70805020110221),
("hh:mmhh:mm", -2.70805020110221),
("minutehour", -0.8362480242006186)],
n = 13}}),
("last n <cycle>",
Classifier{okData =
ClassData{prior = 0.0, unseen = -4.04305126783455,
likelihoods =
HashMap.fromList
[("week", -2.639057329615259),
("integer (0..19)year (grain)", -3.332204510175204),
("integer (numeric)day (grain)", -2.9267394020670396),
("second", -2.9267394020670396),
("integer (numeric)second (grain)", -3.332204510175204),
("integer (numeric)year (grain)", -2.9267394020670396),
("day", -2.639057329615259), ("year", -2.639057329615259),
("integer (numeric)week (grain)", -3.332204510175204),
("integer (0..19)month (grain)", -2.9267394020670396),
("integer (0..19)second (grain)", -3.332204510175204),
("hour", -2.9267394020670396), ("month", -2.639057329615259),
("integer (numeric)minute (grain)", -3.332204510175204),
("integer (0..19)minute (grain)", -3.332204510175204),
("integer (numeric)month (grain)", -3.332204510175204),
("minute", -2.9267394020670396),
("integer (numeric)hour (grain)", -2.9267394020670396),
("integer (0..19)day (grain)", -3.332204510175204),
("integer (0..19)week (grain)", -2.9267394020670396)],
n = 18},
koData =
ClassData{prior = -infinity, unseen = -3.044522437723423,
likelihoods = HashMap.fromList [], n = 0}}),
("<named-month> <day-of-month> (non ordinal)",
Classifier{okData =
ClassData{prior = -0.11778303565638351,
unseen = -3.044522437723423,
likelihoods =
HashMap.fromList
[("Aprilinteger (numeric)", -2.3025850929940455),
("month", -0.7985076962177716),
("Februaryinteger (numeric)", -1.3862943611198906),
("Julyinteger (numeric)", -1.6094379124341003)],
n = 8},
koData =
ClassData{prior = -2.1972245773362196,
unseen = -1.9459101490553135,
likelihoods =
HashMap.fromList
[("Aprilinteger (numeric)", -1.0986122886681098),
("month", -1.0986122886681098)],
n = 1}}),
("<day-of-month> (non ordinal) <named-month>",
Classifier{okData =
ClassData{prior = -0.1466034741918754, unseen = -3.828641396489095,
likelihoods =
HashMap.fromList
[("integer (numeric)September", -2.70805020110221),
("integer (numeric)April", -3.1135153092103742),
("integer (numeric)August", -1.5040773967762742),
("integer (numeric)February", -2.4203681286504293),
("month", -0.8109302162163288),
("integer (numeric)July", -2.70805020110221),
("integer (numeric)March", -2.70805020110221)],
n = 19},
koData =
ClassData{prior = -1.9924301646902063, unseen = -2.639057329615259,
likelihoods =
HashMap.fromList
[("month", -1.1786549963416462),
("integer (numeric)July", -1.1786549963416462)],
n = 3}}),
("this|next <day-of-week>",
Classifier{okData =
ClassData{prior = -0.5596157879354228, unseen = -2.639057329615259,
likelihoods =
HashMap.fromList
[("Wednesday", -1.8718021769015913),
("Monday", -1.8718021769015913), ("day", -0.9555114450274363),
("Tuesday", -1.466337068793427)],
n = 4},
koData =
ClassData{prior = -0.8472978603872037,
unseen = -2.4849066497880004,
likelihoods =
HashMap.fromList
[("Wednesday", -1.7047480922384253),
("Friday", -1.2992829841302609), ("day", -1.0116009116784799)],
n = 3}}),
("<hour-of-day> half (as relative minutes)",
Classifier{okData =
ClassData{prior = -infinity, unseen = -1.3862943611198906,
likelihoods = HashMap.fromList [], n = 0},
koData =
ClassData{prior = 0.0, unseen = -2.890371757896165,
likelihoods =
HashMap.fromList
[("about <time-of-day>", -1.7346010553881064),
("time-of-day (latent)", -1.041453874828161),
("hour", -0.7537718023763802)],
n = 7}}),
("ordinal (digits)",
Classifier{okData =
ClassData{prior = -8.701137698962981e-2,
unseen = -2.5649493574615367,
likelihoods = HashMap.fromList [("", 0.0)], n = 11},
koData =
ClassData{prior = -2.4849066497880004,
unseen = -1.0986122886681098,
likelihoods = HashMap.fromList [("", 0.0)], n = 1}}),
("quarter (grain)",
Classifier{okData =
ClassData{prior = 0.0, unseen = -2.0794415416798357,
likelihoods = HashMap.fromList [("", 0.0)], n = 6},
koData =
ClassData{prior = -infinity, unseen = -0.6931471805599453,
likelihoods = HashMap.fromList [], n = 0}}),
("last <cycle> of <time>",
Classifier{okData =
ClassData{prior = 0.0, unseen = -3.1354942159291497,
likelihoods =
HashMap.fromList
[("day (grain)October", -1.9924301646902063),
("daymonth", -1.4816045409242156),
("day (grain)intersect", -1.9924301646902063),
("weekmonth", -1.4816045409242156),
("week (grain)intersect", -1.9924301646902063),
("week (grain)September", -1.9924301646902063)],
n = 8},
koData =
ClassData{prior = -infinity, unseen = -1.9459101490553135,
likelihoods = HashMap.fromList [], n = 0}}),
("<day-of-month>(ordinal) <named-month> year",
Classifier{okData =
ClassData{prior = 0.0, unseen = -2.3978952727983707,
likelihoods =
HashMap.fromList
[("ordinals (first..19st)April", -1.6094379124341003),
("ordinals (first..19st)March", -1.6094379124341003),
("month", -0.916290731874155),
("ordinal (digits)March", -1.6094379124341003)],
n = 3},
koData =
ClassData{prior = -infinity, unseen = -1.6094379124341003,
likelihoods = HashMap.fromList [], n = 0}}),
("morning",
Classifier{okData =
ClassData{prior = -2.1972245773362196,
unseen = -1.3862943611198906,
likelihoods = HashMap.fromList [("", 0.0)], n = 2},
koData =
ClassData{prior = -0.11778303565638351,
unseen = -2.890371757896165,
likelihoods = HashMap.fromList [("", 0.0)], n = 16}}),
("week-end",
Classifier{okData =
ClassData{prior = 0.0, unseen = -1.6094379124341003,
likelihoods = HashMap.fromList [("", 0.0)], n = 3},
koData =
ClassData{prior = -infinity, unseen = -0.6931471805599453,
likelihoods = HashMap.fromList [], n = 0}}),
("after <time-of-day>",
Classifier{okData =
ClassData{prior = -0.6931471805599453, unseen = -3.58351893845611,
likelihoods =
HashMap.fromList
[("at <time-of-day>", -1.6094379124341003),
("intersect", -2.169053700369523),
("tomorrow", -2.169053700369523), ("day", -2.169053700369523),
("hour", -1.252762968495368)],
n = 12},
koData =
ClassData{prior = -0.6931471805599453, unseen = -3.58351893845611,
likelihoods =
HashMap.fromList
[("this <part-of-day>", -2.169053700369523),
("christmas eve", -2.456735772821304),
("in|during the <part-of-day>", -2.169053700369523),
("day", -2.456735772821304), ("hh:mm", -2.8622008809294686),
("hour", -1.252762968495368), ("minute", -2.8622008809294686),
("this <time>", -2.169053700369523)],
n = 12}}),
("day (grain)",
Classifier{okData =
ClassData{prior = -4.445176257083381e-2,
unseen = -3.1780538303479458,
likelihoods = HashMap.fromList [("", 0.0)], n = 22},
koData =
ClassData{prior = -3.1354942159291497,
unseen = -1.0986122886681098,
likelihoods = HashMap.fromList [("", 0.0)], n = 1}}),
("<month> dd-dd (interval)",
Classifier{okData =
ClassData{prior = 0.0, unseen = -2.1972245773362196,
likelihoods =
HashMap.fromList
[("July", -0.6931471805599453), ("month", -0.6931471805599453)],
n = 3},
koData =
ClassData{prior = -infinity, unseen = -1.0986122886681098,
likelihoods = HashMap.fromList [], n = 0}}),
("about <duration>",
Classifier{okData =
ClassData{prior = 0.0, unseen = -1.9459101490553135,
likelihoods =
HashMap.fromList
[("half an hour", -0.6931471805599453),
("minute", -0.6931471805599453)],
n = 2},
koData =
ClassData{prior = -infinity, unseen = -1.0986122886681098,
likelihoods = HashMap.fromList [], n = 0}}),
("this <time>",
Classifier{okData =
ClassData{prior = -1.3862943611198906, unseen = -3.871201010907891,
likelihoods =
HashMap.fromList
[("week", -2.2407096892759584), ("October", -2.751535313041949),
("intersect", -2.751535313041949),
("season", -2.2407096892759584),
("next <cycle>", -3.1570004211501135),
("day", -2.2407096892759584),
("this <cycle>", -2.463853240590168),
("hour", -2.2407096892759584), ("evening", -3.1570004211501135),
("month", -2.2407096892759584),
("morning", -3.1570004211501135),
("week-end", -2.751535313041949)],
n = 16},
koData =
ClassData{prior = -0.2876820724517809, unseen = -4.718498871295094,
likelihoods =
HashMap.fromList
[("September", -3.100092288878234),
("October", -2.3116349285139637),
("intersect", -2.1445808438507976),
("time-of-day (latent)", -3.6109179126442243),
("March", -3.100092288878234), ("hour", -1.7650912221458936),
("month", -1.2755429968271879),
("morning", -1.8763168572561182)],
n = 48}}),
("within <duration>",
Classifier{okData =
ClassData{prior = 0.0, unseen = -1.6094379124341003,
likelihoods =
HashMap.fromList
[("week", -0.6931471805599453),
("<integer> <unit-of-duration>", -0.6931471805599453)],
n = 1},
koData =
ClassData{prior = -infinity, unseen = -1.0986122886681098,
likelihoods = HashMap.fromList [], n = 0}}),
("August",
Classifier{okData =
ClassData{prior = 0.0, unseen = -2.3978952727983707,
likelihoods = HashMap.fromList [("", 0.0)], n = 9},
koData =
ClassData{prior = -infinity, unseen = -0.6931471805599453,
likelihoods = HashMap.fromList [], n = 0}})] |
fd8a44934825e5819a0fa33062d1aff1de6d03f233c3cdd38eb3001fd417dc92 | ijvcms/chuanqi_dev | skill_pp.erl | %%%-------------------------------------------------------------------
@author zhengsiying
( C ) 2015 , < COMPANY >
%%% @doc
%%%
%%% @end
Created : 19 . 八月 2015 下午3:21
%%%-------------------------------------------------------------------
-module(skill_pp).
-include("common.hrl").
-include("record.hrl").
-include("proto.hrl").
-include("cache.hrl").
-include("language_config.hrl").
%% API
-export([
handle/3
]).
-define(MAX_ATK_NUM, 2).
%% ====================================================================
%% API functions
%% ====================================================================
%% 获取玩家已学技能列表
handle(12001, PlayerState, _) ->
?INFO("recv 12001 =========", []),
SkillDict = PlayerState#player_state.skill_dict,
F = fun(_K, V, Acc) ->
ProtoSkill = #proto_skill{
skill_id = V#db_skill.skill_id,
lv = V#db_skill.lv,
exp = V#db_skill.exp,
pos = V#db_skill.pos,
auto_set = V#db_skill.auto_set
},
[ProtoSkill | Acc]
end,
SkillList = dict:fold(F, [], SkillDict),
net_send:send_to_client(PlayerState#player_state.socket, 12001, #rep_skill_list{skill_list = SkillList});
释放技能
handle(12002, PlayerState, Data) ->
?INFO("12002 ~p", [Data]),
case PlayerState#player_state.atk_num > ?MAX_ATK_NUM of
true ->
?ERR("111KK ~p", [22]),
%% player_lib:kick_player(PlayerState, ?ERR_ACCOUNT_ERR);
Base = PlayerState#player_state.db_player_base,
player_id_name_lib:add_user_plug(PlayerState#player_state.player_id, PlayerState#player_state.open_id, Base#db_player_base.name, Base#db_player_base.vip, PlayerState#player_state.atk_num);
_ ->
PlayerState1 = PlayerState#player_state{
atk_num = PlayerState#player_state.atk_num + 1
},
CurTime = util_date:unixtime(),
PlayerState2 = case PlayerState1#player_state.atk_time < CurTime of
true ->
PlayerState1#player_state{
atk_num = 0,
atk_time = CurTime
};
_ ->
PlayerState1
end,
%% ?INFO("12002 ~p", [Data]),
case skill_base_lib:start_use_skill(PlayerState2, Data) of
{ok, PlayerState3} ->
{ok, PlayerState3};
{fail, Err} ->
Data1 = #rep_start_use_skill{
result = Err
},
%% ?ERR("~p", [Err]),
net_send:send_to_client(PlayerState2#player_state.socket, 12002, Data1);
_DD ->
?ERR("SKIPP ~p", [_DD]),
skip
end
end;
升级与学习技能
handle(12004, PlayerState, #req_upgrade_skill{skill_id = SkillId, lv = SkillLv}) ->
case skill_tree_lib:learn_skill(PlayerState, SkillId, SkillLv) of
{ok, PlayerState1} ->
net_send:send_to_client(PlayerState#player_state.socket, 12004, #rep_upgrade_skill{result = ?ERR_COMMON_SUCCESS}),
{ok, PlayerState1};
{fail, Result} ->
net_send:send_to_client(PlayerState#player_state.socket, 12004, #rep_upgrade_skill{result = Result})
end;
%% "技能设置快捷键"
handle(12005, PlayerState, #req_set_pos{skill_id = SkillId, pos = Pos}) ->
case skill_tree_lib:set_skill_keyboard(PlayerState, SkillId, Pos) of
{ok, PlayerState1} ->
net_send:send_to_client(PlayerState#player_state.socket, 12005, #rep_set_pos{result = ?ERR_COMMON_SUCCESS}),
{ok, PlayerState1};
{fail, Result} ->
net_send:send_to_client(PlayerState#player_state.socket, 12005, #rep_set_pos{result = Result})
end;
%% 清空快捷键
handle(12006, PlayerState, #req_clear_pos{skill_id = SkillId}) ->
case skill_tree_lib:clear_skill_keyboard(PlayerState, SkillId) of
{ok, PlayerState1} ->
net_send:send_to_client(PlayerState#player_state.socket, 12006, #rep_clear_pos{result = ?ERR_COMMON_SUCCESS}),
{ok, PlayerState1};
{fail, Result} ->
net_send:send_to_client(PlayerState#player_state.socket, 12006, #rep_clear_pos{result = Result})
end;
%% 激活自动技能
handle(12007, PlayerState, #req_active_auto_skill{skill_id = SkillId, switch = Switch}) ->
case skill_tree_lib:active_skill_auto_set(PlayerState, SkillId, Switch) of
{ok, PlayerState1} ->
case SkillId of
30600 when Switch == 1 ->
case skill_tree_lib:active_skill_auto_set(PlayerState1, 30800, 0) of
{ok, PlayerState2} ->
net_send:send_to_client(PlayerState#player_state.socket, 12007, #rep_active_auto_skill{result = ?ERR_COMMON_SUCCESS}),
{ok, PlayerState2};
_ ->
net_send:send_to_client(PlayerState#player_state.socket, 12007, #rep_active_auto_skill{result = ?ERR_COMMON_SUCCESS}),
{ok, PlayerState1}
end;
30800 when Switch == 1 ->
case skill_tree_lib:active_skill_auto_set(PlayerState1, 30600, 0) of
{ok, PlayerState2} ->
net_send:send_to_client(PlayerState#player_state.socket, 12007, #rep_active_auto_skill{result = ?ERR_COMMON_SUCCESS}),
{ok, PlayerState2};
_ ->
net_send:send_to_client(PlayerState#player_state.socket, 12007, #rep_active_auto_skill{result = ?ERR_COMMON_SUCCESS}),
{ok, PlayerState1}
end;
_ ->
net_send:send_to_client(PlayerState#player_state.socket, 12007, #rep_active_auto_skill{result = ?ERR_COMMON_SUCCESS}),
{ok, PlayerState1}
end;
{fail, Result} ->
net_send:send_to_client(PlayerState#player_state.socket, 12007, #rep_active_auto_skill{result = Result})
end;
%% 设置与获取群体技能开关
handle(12008, PlayerState, #req_set_group_switch{type = Type}) ->
DbBase = PlayerState#player_state.db_player_base,
case Type of
0 ->
Update = #player_state{db_player_base = #db_player_base{skill_set = 0}},
net_send:send_to_client(PlayerState#player_state.socket, 12008, #rep_set_group_switch{type = 0}),
player_lib:update_player_state(PlayerState, Update);
1 ->
Update = #player_state{db_player_base = #db_player_base{skill_set = 1}},
net_send:send_to_client(PlayerState#player_state.socket, 12008, #rep_set_group_switch{type = 1}),
player_lib:update_player_state(PlayerState, Update);
_ ->
Set = DbBase#db_player_base.skill_set,
net_send:send_to_client(PlayerState#player_state.socket, 12008, #rep_set_group_switch{type = Set})
end;
%% 增加技能熟练度
handle(12009, PlayerState, #req_add_skill_exp{skill_id = SkillId, goods_id = GoodsId, num = Num}) ->
case skill_tree_lib:upgrade_skill(PlayerState, SkillId, GoodsId, Num) of
{ok, PlayerState1} ->
net_send:send_to_client(PlayerState#player_state.socket, 12009, #rep_add_skill_exp{result = ?ERR_COMMON_SUCCESS}),
{ok, PlayerState1};
{fail, Result} ->
net_send:send_to_client(PlayerState#player_state.socket, 12009, #rep_add_skill_exp{result = Result})
end;
%% 触发技能效果
handle(12010, PlayerState, _Data) ->
%% ?INFO("12010 ~p", [_Data]),
skill_base_lib:trigger_skill(PlayerState);
handle(Cmd, PlayerState, Data) ->
?ERR("not define ~p cmd:~nstate: ~p~ndata: ~p", [Cmd, PlayerState, Data]),
{ok, PlayerState}.
%% ====================================================================
Internal functions
%% ====================================================================
| null | https://raw.githubusercontent.com/ijvcms/chuanqi_dev/7742184bded15f25be761c4f2d78834249d78097/server/trunk/server/src/business/skill/skill_pp.erl | erlang | -------------------------------------------------------------------
@doc
@end
-------------------------------------------------------------------
API
====================================================================
API functions
====================================================================
获取玩家已学技能列表
player_lib:kick_player(PlayerState, ?ERR_ACCOUNT_ERR);
?INFO("12002 ~p", [Data]),
?ERR("~p", [Err]),
"技能设置快捷键"
清空快捷键
激活自动技能
设置与获取群体技能开关
增加技能熟练度
触发技能效果
?INFO("12010 ~p", [_Data]),
====================================================================
==================================================================== | @author zhengsiying
( C ) 2015 , < COMPANY >
Created : 19 . 八月 2015 下午3:21
-module(skill_pp).
-include("common.hrl").
-include("record.hrl").
-include("proto.hrl").
-include("cache.hrl").
-include("language_config.hrl").
-export([
handle/3
]).
-define(MAX_ATK_NUM, 2).
handle(12001, PlayerState, _) ->
?INFO("recv 12001 =========", []),
SkillDict = PlayerState#player_state.skill_dict,
F = fun(_K, V, Acc) ->
ProtoSkill = #proto_skill{
skill_id = V#db_skill.skill_id,
lv = V#db_skill.lv,
exp = V#db_skill.exp,
pos = V#db_skill.pos,
auto_set = V#db_skill.auto_set
},
[ProtoSkill | Acc]
end,
SkillList = dict:fold(F, [], SkillDict),
net_send:send_to_client(PlayerState#player_state.socket, 12001, #rep_skill_list{skill_list = SkillList});
释放技能
handle(12002, PlayerState, Data) ->
?INFO("12002 ~p", [Data]),
case PlayerState#player_state.atk_num > ?MAX_ATK_NUM of
true ->
?ERR("111KK ~p", [22]),
Base = PlayerState#player_state.db_player_base,
player_id_name_lib:add_user_plug(PlayerState#player_state.player_id, PlayerState#player_state.open_id, Base#db_player_base.name, Base#db_player_base.vip, PlayerState#player_state.atk_num);
_ ->
PlayerState1 = PlayerState#player_state{
atk_num = PlayerState#player_state.atk_num + 1
},
CurTime = util_date:unixtime(),
PlayerState2 = case PlayerState1#player_state.atk_time < CurTime of
true ->
PlayerState1#player_state{
atk_num = 0,
atk_time = CurTime
};
_ ->
PlayerState1
end,
case skill_base_lib:start_use_skill(PlayerState2, Data) of
{ok, PlayerState3} ->
{ok, PlayerState3};
{fail, Err} ->
Data1 = #rep_start_use_skill{
result = Err
},
net_send:send_to_client(PlayerState2#player_state.socket, 12002, Data1);
_DD ->
?ERR("SKIPP ~p", [_DD]),
skip
end
end;
升级与学习技能
handle(12004, PlayerState, #req_upgrade_skill{skill_id = SkillId, lv = SkillLv}) ->
case skill_tree_lib:learn_skill(PlayerState, SkillId, SkillLv) of
{ok, PlayerState1} ->
net_send:send_to_client(PlayerState#player_state.socket, 12004, #rep_upgrade_skill{result = ?ERR_COMMON_SUCCESS}),
{ok, PlayerState1};
{fail, Result} ->
net_send:send_to_client(PlayerState#player_state.socket, 12004, #rep_upgrade_skill{result = Result})
end;
handle(12005, PlayerState, #req_set_pos{skill_id = SkillId, pos = Pos}) ->
case skill_tree_lib:set_skill_keyboard(PlayerState, SkillId, Pos) of
{ok, PlayerState1} ->
net_send:send_to_client(PlayerState#player_state.socket, 12005, #rep_set_pos{result = ?ERR_COMMON_SUCCESS}),
{ok, PlayerState1};
{fail, Result} ->
net_send:send_to_client(PlayerState#player_state.socket, 12005, #rep_set_pos{result = Result})
end;
handle(12006, PlayerState, #req_clear_pos{skill_id = SkillId}) ->
case skill_tree_lib:clear_skill_keyboard(PlayerState, SkillId) of
{ok, PlayerState1} ->
net_send:send_to_client(PlayerState#player_state.socket, 12006, #rep_clear_pos{result = ?ERR_COMMON_SUCCESS}),
{ok, PlayerState1};
{fail, Result} ->
net_send:send_to_client(PlayerState#player_state.socket, 12006, #rep_clear_pos{result = Result})
end;
handle(12007, PlayerState, #req_active_auto_skill{skill_id = SkillId, switch = Switch}) ->
case skill_tree_lib:active_skill_auto_set(PlayerState, SkillId, Switch) of
{ok, PlayerState1} ->
case SkillId of
30600 when Switch == 1 ->
case skill_tree_lib:active_skill_auto_set(PlayerState1, 30800, 0) of
{ok, PlayerState2} ->
net_send:send_to_client(PlayerState#player_state.socket, 12007, #rep_active_auto_skill{result = ?ERR_COMMON_SUCCESS}),
{ok, PlayerState2};
_ ->
net_send:send_to_client(PlayerState#player_state.socket, 12007, #rep_active_auto_skill{result = ?ERR_COMMON_SUCCESS}),
{ok, PlayerState1}
end;
30800 when Switch == 1 ->
case skill_tree_lib:active_skill_auto_set(PlayerState1, 30600, 0) of
{ok, PlayerState2} ->
net_send:send_to_client(PlayerState#player_state.socket, 12007, #rep_active_auto_skill{result = ?ERR_COMMON_SUCCESS}),
{ok, PlayerState2};
_ ->
net_send:send_to_client(PlayerState#player_state.socket, 12007, #rep_active_auto_skill{result = ?ERR_COMMON_SUCCESS}),
{ok, PlayerState1}
end;
_ ->
net_send:send_to_client(PlayerState#player_state.socket, 12007, #rep_active_auto_skill{result = ?ERR_COMMON_SUCCESS}),
{ok, PlayerState1}
end;
{fail, Result} ->
net_send:send_to_client(PlayerState#player_state.socket, 12007, #rep_active_auto_skill{result = Result})
end;
handle(12008, PlayerState, #req_set_group_switch{type = Type}) ->
DbBase = PlayerState#player_state.db_player_base,
case Type of
0 ->
Update = #player_state{db_player_base = #db_player_base{skill_set = 0}},
net_send:send_to_client(PlayerState#player_state.socket, 12008, #rep_set_group_switch{type = 0}),
player_lib:update_player_state(PlayerState, Update);
1 ->
Update = #player_state{db_player_base = #db_player_base{skill_set = 1}},
net_send:send_to_client(PlayerState#player_state.socket, 12008, #rep_set_group_switch{type = 1}),
player_lib:update_player_state(PlayerState, Update);
_ ->
Set = DbBase#db_player_base.skill_set,
net_send:send_to_client(PlayerState#player_state.socket, 12008, #rep_set_group_switch{type = Set})
end;
handle(12009, PlayerState, #req_add_skill_exp{skill_id = SkillId, goods_id = GoodsId, num = Num}) ->
case skill_tree_lib:upgrade_skill(PlayerState, SkillId, GoodsId, Num) of
{ok, PlayerState1} ->
net_send:send_to_client(PlayerState#player_state.socket, 12009, #rep_add_skill_exp{result = ?ERR_COMMON_SUCCESS}),
{ok, PlayerState1};
{fail, Result} ->
net_send:send_to_client(PlayerState#player_state.socket, 12009, #rep_add_skill_exp{result = Result})
end;
handle(12010, PlayerState, _Data) ->
skill_base_lib:trigger_skill(PlayerState);
handle(Cmd, PlayerState, Data) ->
?ERR("not define ~p cmd:~nstate: ~p~ndata: ~p", [Cmd, PlayerState, Data]),
{ok, PlayerState}.
Internal functions
|
aace5ce8d325f95d2a0ae6b9b407aa490122c604274f6514fccbe5daa5ae7bb3 | public-law/nevada-revised-statutes-parser | Chapter.hs | module Models.Chapter where
import BasicPrelude ( Show
, Text
)
import Data.Aeson ( ToJSON )
import GHC.Generics ( Generic )
import Models.SubChapter
import Models.Section
data Chapter =
Chapter {
name :: Text,
number :: Text,
url :: Text,
content :: ChapterContent
} deriving (Generic, Show)
data ChapterContent = SimpleChapterContent [Section]
| ComplexChapterContent [SubChapter]
deriving (Generic, Show)
instance ToJSON ChapterContent
instance ToJSON Chapter
| null | https://raw.githubusercontent.com/public-law/nevada-revised-statutes-parser/88a886debdab6ce5bd6cf5819c846cec1ffb9220/src/Models/Chapter.hs | haskell | module Models.Chapter where
import BasicPrelude ( Show
, Text
)
import Data.Aeson ( ToJSON )
import GHC.Generics ( Generic )
import Models.SubChapter
import Models.Section
data Chapter =
Chapter {
name :: Text,
number :: Text,
url :: Text,
content :: ChapterContent
} deriving (Generic, Show)
data ChapterContent = SimpleChapterContent [Section]
| ComplexChapterContent [SubChapter]
deriving (Generic, Show)
instance ToJSON ChapterContent
instance ToJSON Chapter
| |
ff73308c7fd100685d0daa630ca9b80a4a27918d15772a4f0e7cae27c21adbcc | emanjavacas/cosycat | annotation_schemas.cljc | (ns cosycat.schemas.annotation-schemas
(:require [schema.core :as s]
[schema.coerce :as coerce]
[taoensso.timbre :as timbre]))
(def cpos-schema s/Int)
(def token-id-schema s/Any)
(def ann-key-schema s/Str)
(def token-span-schema
{:type (s/enum "token")
:scope cpos-schema
(s/optional-key :doc) s/Any})
(def iob-span-schema
{:type (s/enum "IOB")
:scope {:B cpos-schema :O cpos-schema}
(s/optional-key :doc) s/Any})
(def span-schema
(s/conditional #(= (:type %) "token") token-span-schema
#(= (:type %) "IOB") iob-span-schema))
(def history-schema
[{:ann {:key ann-key-schema :value s/Str}
:username s/Str
:timestamp s/Int
:corpus s/Str
(s/optional-key :query) s/Str
:hit-id s/Any
:_version s/Int
:span span-schema}])
(def annotation-schema
{:ann {:key s/Str :value s/Str}
#?(:clj :username :cljs (s/optional-key :username)) s/Str ;; we don't always send username along
:timestamp s/Int
:span span-schema
:corpus s/Str
(s/optional-key :query) s/Str ;; review annotations don't have query
:hit-id s/Any
#?(:clj :_id :cljs (s/optional-key :_id)) s/Any ;; outgoing annotations do not have an id yet
#?(:clj :_version :cljs (s/optional-key :_version)) s/Any
(s/optional-key :history) history-schema}) ;; this is the same except history and _id
| null | https://raw.githubusercontent.com/emanjavacas/cosycat/a7186363d3c0bdc7b714af126feb565f98793a6e/src/cljc/cosycat/schemas/annotation_schemas.cljc | clojure | we don't always send username along
review annotations don't have query
outgoing annotations do not have an id yet
this is the same except history and _id | (ns cosycat.schemas.annotation-schemas
(:require [schema.core :as s]
[schema.coerce :as coerce]
[taoensso.timbre :as timbre]))
(def cpos-schema s/Int)
(def token-id-schema s/Any)
(def ann-key-schema s/Str)
(def token-span-schema
{:type (s/enum "token")
:scope cpos-schema
(s/optional-key :doc) s/Any})
(def iob-span-schema
{:type (s/enum "IOB")
:scope {:B cpos-schema :O cpos-schema}
(s/optional-key :doc) s/Any})
(def span-schema
(s/conditional #(= (:type %) "token") token-span-schema
#(= (:type %) "IOB") iob-span-schema))
(def history-schema
[{:ann {:key ann-key-schema :value s/Str}
:username s/Str
:timestamp s/Int
:corpus s/Str
(s/optional-key :query) s/Str
:hit-id s/Any
:_version s/Int
:span span-schema}])
(def annotation-schema
{:ann {:key s/Str :value s/Str}
:timestamp s/Int
:span span-schema
:corpus s/Str
:hit-id s/Any
#?(:clj :_version :cljs (s/optional-key :_version)) s/Any
|
ddc3667a48fac3c7a4cc0324c003905b478bd4125e9ab3cee8669cf1d85378d5 | FranklinChen/hugs98-plus-Sep2006 | IORef.hs | module IORef
{-# DEPRECATED "This module has moved to Data.IORef" #-}
(module Data.IORef) where
import Data.IORef
| null | https://raw.githubusercontent.com/FranklinChen/hugs98-plus-Sep2006/54ab69bd6313adbbed1d790b46aca2a0305ea67e/fptools/hslibs/lang/IORef.hs | haskell | # DEPRECATED "This module has moved to Data.IORef" # | module IORef
(module Data.IORef) where
import Data.IORef
|
87e5212d86bcc542a2360fb14bab21300dee00b2276e894ec03d0001076eeffa | DavidAlphaFox/RabbitMQ | mochiweb.erl | @author < >
2007 Mochi Media , Inc.
@doc Start and stop the MochiWeb server .
-module(mochiweb).
-author('').
-export([new_request/1, new_response/1]).
-export([all_loaded/0, all_loaded/1, reload/0]).
-export([ensure_started/1]).
reload() ->
[c:l(Module) || Module <- all_loaded()].
all_loaded() ->
all_loaded(filename:dirname(code:which(?MODULE))).
all_loaded(Base) when is_atom(Base) ->
[];
all_loaded(Base) ->
FullBase = Base ++ "/",
F = fun ({_Module, Loaded}, Acc) when is_atom(Loaded) ->
Acc;
({Module, Loaded}, Acc) ->
case lists:prefix(FullBase, Loaded) of
true ->
[Module | Acc];
false ->
Acc
end
end,
lists:foldl(F, [], code:all_loaded()).
, Request , Headers } ) - > MochiWebRequest
%% @doc Return a mochiweb_request data structure.
new_request({Socket, {Method, {abs_path, Uri}, Version}, Headers}) ->
mochiweb_request:new(Socket,
Method,
Uri,
Version,
mochiweb_headers:make(Headers));
% this case probably doesn't "exist".
new_request({Socket, {Method, {absoluteURI, _Protocol, _Host, _Port, Uri},
Version}, Headers}) ->
mochiweb_request:new(Socket,
Method,
Uri,
Version,
mochiweb_headers:make(Headers));
%% Request-URI is "*"
From -sec5.html#sec5.1.2
new_request({Socket, {Method, '*'=Uri, Version}, Headers}) ->
mochiweb_request:new(Socket,
Method,
Uri,
Version,
mochiweb_headers:make(Headers)).
@spec new_response({Request , integer ( ) , Headers } ) - > MochiWebResponse
%% @doc Return a mochiweb_response data structure.
new_response({Request, Code, Headers}) ->
mochiweb_response:new(Request,
Code,
mochiweb_headers:make(Headers)).
ensure_started(App::atom ( ) ) - > ok
%% @doc Start the given App if it has not been started already.
ensure_started(App) ->
case application:start(App) of
ok ->
ok;
{error, {already_started, App}} ->
ok
end.
| null | https://raw.githubusercontent.com/DavidAlphaFox/RabbitMQ/0a64e6f0464a9a4ce85c6baa52fb1c584689f49a/plugins-src/mochiweb-wrapper/mochiweb-git/src/mochiweb.erl | erlang | @doc Return a mochiweb_request data structure.
this case probably doesn't "exist".
Request-URI is "*"
@doc Return a mochiweb_response data structure.
@doc Start the given App if it has not been started already. | @author < >
2007 Mochi Media , Inc.
@doc Start and stop the MochiWeb server .
-module(mochiweb).
-author('').
-export([new_request/1, new_response/1]).
-export([all_loaded/0, all_loaded/1, reload/0]).
-export([ensure_started/1]).
reload() ->
[c:l(Module) || Module <- all_loaded()].
all_loaded() ->
all_loaded(filename:dirname(code:which(?MODULE))).
all_loaded(Base) when is_atom(Base) ->
[];
all_loaded(Base) ->
FullBase = Base ++ "/",
F = fun ({_Module, Loaded}, Acc) when is_atom(Loaded) ->
Acc;
({Module, Loaded}, Acc) ->
case lists:prefix(FullBase, Loaded) of
true ->
[Module | Acc];
false ->
Acc
end
end,
lists:foldl(F, [], code:all_loaded()).
, Request , Headers } ) - > MochiWebRequest
new_request({Socket, {Method, {abs_path, Uri}, Version}, Headers}) ->
mochiweb_request:new(Socket,
Method,
Uri,
Version,
mochiweb_headers:make(Headers));
new_request({Socket, {Method, {absoluteURI, _Protocol, _Host, _Port, Uri},
Version}, Headers}) ->
mochiweb_request:new(Socket,
Method,
Uri,
Version,
mochiweb_headers:make(Headers));
From -sec5.html#sec5.1.2
new_request({Socket, {Method, '*'=Uri, Version}, Headers}) ->
mochiweb_request:new(Socket,
Method,
Uri,
Version,
mochiweb_headers:make(Headers)).
@spec new_response({Request , integer ( ) , Headers } ) - > MochiWebResponse
new_response({Request, Code, Headers}) ->
mochiweb_response:new(Request,
Code,
mochiweb_headers:make(Headers)).
ensure_started(App::atom ( ) ) - > ok
ensure_started(App) ->
case application:start(App) of
ok ->
ok;
{error, {already_started, App}} ->
ok
end.
|
25ac2866c80fbf524af0d61165007b839aa5f64bfc2ca0644aae5110bf87179d | squaresLab/genprog-code | minimization.ml |
*
* Copyright ( c ) 2012 - 2018 ,
* < >
* < >
* < >
* < >
* < >
* All rights reserved .
*
* Redistribution and use in source and binary forms , with or without
* modification , are permitted provided that the following conditions are
* met :
*
* 1 . Redistributions of source code must retain the above copyright
* notice , this list of conditions and the following disclaimer .
*
* 2 . Redistributions in binary form must reproduce the above copyright
* notice , this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution .
*
* 3 . The names of the contributors may not be used to endorse or promote
* products derived from this software without specific prior written
* permission .
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS " AS
* IS " AND ANY EXPRESS OR IMPLIED WARRANTIES , INCLUDING , BUT NOT LIMITED
* TO , THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
* PARTICULAR PURPOSE ARE DISCLAIMED . IN NO EVENT SHALL THE COPYRIGHT OWNER
* OR FOR ANY DIRECT , INDIRECT , INCIDENTAL , SPECIAL ,
* EXEMPLARY , OR CONSEQUENTIAL DAMAGES ( INCLUDING , BUT NOT LIMITED TO ,
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES ; LOSS OF USE , DATA , OR
* PROFITS ; OR BUSINESS INTERRUPTION ) HOWEVER CAUSED AND ON ANY THEORY OF
* LIABILITY , WHETHER IN CONTRACT , STRICT LIABILITY , OR TORT ( INCLUDING
* NEGLIGENCE OR OTHERWISE ) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE , EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE .
*
*
* Copyright (c) 2012-2018,
* Wes Weimer <>
* Stephanie Forrest <>
* Claire Le Goues <>
* Eric Schulte <>
* Jeremy Lacomis <>
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met:
*
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* 3. The names of the contributors may not be used to endorse or promote
* products derived from this software without specific prior written
* permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
* IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
* TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
* PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER
* OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
* EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
* PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
* LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
* NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
*)
* Minimization -- implements delta debugging to produce a 1 - minimal subset of
differences between a repaired variant and the original . Can minimize
either the edit history list or a list of cdiff changes ( provided by the
cdiff module ) .
differences between a repaired variant and the original. Can minimize
either the edit history list or a list of cdiff changes (provided by the
cdiff module). *)
open Cil
open Global
open Cdiff
open Printf
let minimization = ref false
let minimize_patch = ref false
let _ =
options := !options @
[
"--minimization", Arg.Set minimization,
" Attempt to minimize diff script using delta-debugging";
"--edit-script", Arg.Set minimize_patch,
" Minimize the edit script, not the tree-based diff. Default: false";
]
(** The structural signature of a variant allows us to compute a fine-grained
diff between individuals using delta-debugging. This implementation is based on
our implementation of cdiff, which applies DiffX to C code; this implementation
could be generalized pretty trivially if necessary.
[signature] maps filenames a map between function names and the root node of
the function's tree.
[node_map] maps node ids to tree_nodes.
*)
type structural_signature =
{ signature : (Cdiff.node_id StringMap.t) StringMap.t ;
node_map : Cdiff.tree_node IntMap.t }
(** virtual minimizableObject defines the basic interface that a representation
must support in order to be minimizable. See cilrep for an example; multiple
inheritence is a gift *)
class type minimizableObjectType = object('self_type)
method copy : unit -> 'self_type
method structural_signature : unit -> structural_signature
* construct_rep asks the object to build itself from either a list of edits
or a diff script , expressed as a list of pairs , where the first element of
the list is the filename and the second element is a diff script
or a diff script, expressed as a list of pairs, where the first element of
the list is the filename and the second element is a diff script *)
method construct_rep : string option -> ((string * string list) list * Cdiff.tree_node IntMap.t) option -> unit
method output : string -> unit
method name : unit -> string
method is_max_fitness : unit -> bool
end
class virtual minimizableObject = object(self : #minimizableObjectType)
(* already_signatured is used for caching *)
(* val already_signatured = ref None*)
(* CLG FIXME: the caching is broken, which is why I've commented it out.
Because minimization no longer calls this repeatedly, it is a low-priority
bug for me and shouldn't impact your user experience. *)
method structural_signature () =
(* match !already_signatured with
Some(s) -> debug "already signatured\n"; s
| None ->
*) let s = self#internal_structural_signature() in
already_signatured : = Some(s ) ;
method virtual internal_structural_signature : unit -> structural_signature
end
(* utilities for delta debugging*)
module DiffElement =
struct
type t = int * string
let compare (x,_) (y,_) = x - y
end
module DiffSet = Set.Make(DiffElement)
let map_union (map1) (map2) : Cdiff.tree_node IntMap.t =
IntMap.fold
(fun k -> fun v -> fun new_map -> IntMap.add k v new_map)
map1 map2
let split str =
let split_str = Str.split whitespace_regexp str in
match split_str with
| [a; b; c; d] -> a, a^" "^b^" "^c^" "^d
| _ -> assert(false)
(* Turn a list of strings into a list of pairs, (string1 * string2), where
string1 is unique. This function can fail if the input list does not match
the expected format (leading to an assert(false) in the split helper
function above). *)
let script_to_pair_list a =
List.fold_left (fun (acc : (string * (string list)) list) (ele : string) ->
let (a : string),(b : string) = split ele in
match acc with
| (a',b') :: tl when a=a' -> (a',(b'@[b])) :: tl
| x -> (a,[b]) :: x
) [] a
let cdiff_data_ht = hcreate 255
* structural_difference_edit_script sig1 sig2 returns a list of ( file ,
global_diffs list ) elements , where a global_diff is a pair of ( global_name , edit
operations ) . This list represents the difference between the two signatures
sig1 and sig2 as reported by Cdiff , our implementation of Xdiff .
global_diffs list) elements, where a global_diff is a pair of (global_name, edit
operations). This list represents the difference between the two signatures
sig1 and sig2 as reported by Cdiff, our implementation of Xdiff. *)
let structural_difference_edit_script
(sig1 : structural_signature)
(sig2 : structural_signature) =
let node_map = map_union sig1.node_map sig2.node_map in
let final_result = ref [] in
Hashtbl.clear cdiff_data_ht;
if map_cardinal sig1.signature == 1 then
StringMap.iter
(fun filename filemap ->
let file2 = StringMap.find filename sig2.signature in
let inner_result = ref [] in
StringMap.iter
(fun global_name1 t1->
let t2 = StringMap.find global_name1 file2 in
let m = Cdiff.mapping node_map t1 t2 in
Hashtbl.add cdiff_data_ht global_name1 (m,t1,t2);
let s =
Cdiff.generate_script
node_map
(Cdiff.node_of_nid node_map t1)
(Cdiff.node_of_nid node_map t2) m
in
inner_result := (global_name1,s) :: !inner_result)
filemap;
final_result := (filename, (List.rev !inner_result) ) :: !final_result)
sig1.signature;
List.rev !final_result
* structural_difference_to_string rep1 computes the edit script
describing the changes between rep1 and and reifies the result as a
string
describing the changes between rep1 and rep2 and reifies the result as a
string *)
let structural_difference_to_string rep1 rep2 =
let script = structural_difference_edit_script rep1 rep2 in
lfoldl (fun str (file,file_diffs) ->
lfoldl (fun str (global,global_diffs) ->
lfoldl (fun str elt ->
let as_string =
Printf.sprintf "%s %s %s\n" file global
FIXME
in
str^as_string
) str global_diffs
) str file_diffs
) "" script
(** {b process_representation} original_variant node_map diff_script applies
diff_script (typically subsetted from the initial diff script) to original
to produce a new variant and calls test_fitness to compute its fitness. It
returns true if the variant produced by applying diff_script to
original_variant passes all test cases and false otherwise. It is a helper
functon for delta_debugging *)
let process_representation (orig : minimizableObjectType) (node_map : Cdiff.tree_node IntMap.t) diff_script =
let the_rep = orig#copy() in
if !minimize_patch then
let script = lfoldl (fun acc str -> acc^" "^str) "" diff_script in
the_rep#construct_rep (Some(script)) (None)
else
the_rep#construct_rep (None) (Some((script_to_pair_list diff_script), node_map));
the_rep#is_max_fitness ()
let delta_set_to_list set = lmap snd (DiffSet.elements set)
let delta_count = ref 0
* { b delta_debugging } original_variant diff_script node_map returns a
one - minimal subset of diff_script that , when applied to original_variant ,
produces a variant that still passes all test cases . delta_debugging
basically implements 's delta_debugging algorithm .
one-minimal subset of diff_script that, when applied to original_variant,
produces a variant that still passes all test cases. delta_debugging
basically implements Zeller's delta_debugging algorithm. *)
let delta_debugging orig to_minimize node_map = begin
(* sanity check the diff script *)
if not (process_representation orig (copy node_map) to_minimize) then
abort "Delta debugging: original script doesn't pass all test cases (and it should)!\n"
else debug "GOOD NEWS: original script passes!\n";
(* initialize the diffset based on the input *)
let counter = ref 0 in
let c =
lfoldl
(fun c x ->
let c =
DiffSet.add ((!counter),x) c in
incr counter; c
) (DiffSet.empty) to_minimize in
let l_find func lst =
try
let res = List.find func lst in
true,Some(res)
with Not_found -> false, None
in
let rec delta_debug c n =
incr delta_count;
debug "Entering delta, pass number %d...\n" !delta_count;
let count = ref 0 in
let ci_array = Array.init n (fun _ -> DiffSet.empty) in
if n<=(DiffSet.cardinal c) then begin
DiffSet.iter (fun (num,x) ->
ci_array.(!count mod n) <- DiffSet.add (num,x) ci_array.(!count mod n);
incr count
) c;
let ci_list = Array.to_list ci_array in
let found,res =
l_find
(fun c_i ->
let node_map' = copy node_map in
let delta_set_lst = delta_set_to_list c_i in
process_representation orig node_map' delta_set_lst)
ci_list
in
if found then delta_debug (get_opt res) 2
else
let found, res =
l_find
(fun c_i ->
let delta_set_lst = delta_set_to_list (DiffSet.diff c c_i) in
let node_map' = copy node_map in
process_representation orig node_map' delta_set_lst)
ci_list
in
if found then
let ci = get_opt res in
delta_debug (DiffSet.diff c ci) (max (n-1) 2)
else if n < ((DiffSet.cardinal c)) then
delta_debug c (min (2*n) (DiffSet.cardinal c))
else c
end else c
in
(* do minimization...*)
let minimized_script = delta_debug c 2 in
(* output minimized script and file *)
let minimized = delta_set_to_list minimized_script in
let min_rep = orig#copy() in
min_rep#construct_rep (None) (Some((script_to_pair_list minimized), node_map));
min_rep#output "Minimization_Files/minimized.c";
let output_name = "minimized.diffscript" in
ensure_directories_exist ("Minimization_Files/full."^output_name);
let fout = open_out ("Minimization_Files/full."^output_name) in
liter (fun x -> Printf.fprintf fout "%s\n" x) minimized;
minimized
end
* { b do_minimization } original_variant initial_repair_variant computes the
edit list between the two input variants and calls delta_debugging to
produce a 1 - minimal subset of those edits ( if minimization is enabled )
edit list between the two input variants and calls delta_debugging to
produce a 1-minimal subset of those edits (if minimization is enabled) *)
let do_minimization orig rep rep_name =
if !minimization then begin
let to_minimize,node_map =
if !minimize_patch then
Str.split space_regexp rep_name, IntMap.empty
else begin
let orig_sig = orig#structural_signature() in
let rep_sig = rep#structural_signature() in
let node_map : Cdiff.tree_node IntMap.t =
map_union orig_sig.node_map rep_sig.node_map
in
let node_id_to_node = hcreate 10 in
IntMap.iter
(fun node_id -> fun node -> hadd node_id_to_node node_id node)
node_map;
let diff_script = structural_difference_to_string orig_sig rep_sig in
Str.split (Str.regexp "\n") diff_script, node_map
end
in
let output_name = "minimized.diffscript" in
ensure_directories_exist ("Minimization_Files/full."^output_name);
CLG question to self : does output as used below do the reasonable thing
for multi - file variants ? I suspect it does , but should probably check .
for multi-file variants? I suspect it does, but should probably check. *)
orig#output "Minimization_Files/original.c";
rep#output "Minimization_Files/unminimized.c";
ignore(delta_debugging orig to_minimize node_map)
end
| null | https://raw.githubusercontent.com/squaresLab/genprog-code/7982415b347529efee02190ab9ba6bd7eda6195e/src/minimization.ml | ocaml | * The structural signature of a variant allows us to compute a fine-grained
diff between individuals using delta-debugging. This implementation is based on
our implementation of cdiff, which applies DiffX to C code; this implementation
could be generalized pretty trivially if necessary.
[signature] maps filenames a map between function names and the root node of
the function's tree.
[node_map] maps node ids to tree_nodes.
* virtual minimizableObject defines the basic interface that a representation
must support in order to be minimizable. See cilrep for an example; multiple
inheritence is a gift
already_signatured is used for caching
val already_signatured = ref None
CLG FIXME: the caching is broken, which is why I've commented it out.
Because minimization no longer calls this repeatedly, it is a low-priority
bug for me and shouldn't impact your user experience.
match !already_signatured with
Some(s) -> debug "already signatured\n"; s
| None ->
utilities for delta debugging
Turn a list of strings into a list of pairs, (string1 * string2), where
string1 is unique. This function can fail if the input list does not match
the expected format (leading to an assert(false) in the split helper
function above).
* {b process_representation} original_variant node_map diff_script applies
diff_script (typically subsetted from the initial diff script) to original
to produce a new variant and calls test_fitness to compute its fitness. It
returns true if the variant produced by applying diff_script to
original_variant passes all test cases and false otherwise. It is a helper
functon for delta_debugging
sanity check the diff script
initialize the diffset based on the input
do minimization...
output minimized script and file |
*
* Copyright ( c ) 2012 - 2018 ,
* < >
* < >
* < >
* < >
* < >
* All rights reserved .
*
* Redistribution and use in source and binary forms , with or without
* modification , are permitted provided that the following conditions are
* met :
*
* 1 . Redistributions of source code must retain the above copyright
* notice , this list of conditions and the following disclaimer .
*
* 2 . Redistributions in binary form must reproduce the above copyright
* notice , this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution .
*
* 3 . The names of the contributors may not be used to endorse or promote
* products derived from this software without specific prior written
* permission .
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS " AS
* IS " AND ANY EXPRESS OR IMPLIED WARRANTIES , INCLUDING , BUT NOT LIMITED
* TO , THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
* PARTICULAR PURPOSE ARE DISCLAIMED . IN NO EVENT SHALL THE COPYRIGHT OWNER
* OR FOR ANY DIRECT , INDIRECT , INCIDENTAL , SPECIAL ,
* EXEMPLARY , OR CONSEQUENTIAL DAMAGES ( INCLUDING , BUT NOT LIMITED TO ,
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES ; LOSS OF USE , DATA , OR
* PROFITS ; OR BUSINESS INTERRUPTION ) HOWEVER CAUSED AND ON ANY THEORY OF
* LIABILITY , WHETHER IN CONTRACT , STRICT LIABILITY , OR TORT ( INCLUDING
* NEGLIGENCE OR OTHERWISE ) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE , EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE .
*
*
* Copyright (c) 2012-2018,
* Wes Weimer <>
* Stephanie Forrest <>
* Claire Le Goues <>
* Eric Schulte <>
* Jeremy Lacomis <>
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met:
*
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* 3. The names of the contributors may not be used to endorse or promote
* products derived from this software without specific prior written
* permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
* IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
* TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
* PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER
* OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
* EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
* PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
* LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
* NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
*)
* Minimization -- implements delta debugging to produce a 1 - minimal subset of
differences between a repaired variant and the original . Can minimize
either the edit history list or a list of cdiff changes ( provided by the
cdiff module ) .
differences between a repaired variant and the original. Can minimize
either the edit history list or a list of cdiff changes (provided by the
cdiff module). *)
open Cil
open Global
open Cdiff
open Printf
let minimization = ref false
let minimize_patch = ref false
let _ =
options := !options @
[
"--minimization", Arg.Set minimization,
" Attempt to minimize diff script using delta-debugging";
"--edit-script", Arg.Set minimize_patch,
" Minimize the edit script, not the tree-based diff. Default: false";
]
type structural_signature =
{ signature : (Cdiff.node_id StringMap.t) StringMap.t ;
node_map : Cdiff.tree_node IntMap.t }
class type minimizableObjectType = object('self_type)
method copy : unit -> 'self_type
method structural_signature : unit -> structural_signature
* construct_rep asks the object to build itself from either a list of edits
or a diff script , expressed as a list of pairs , where the first element of
the list is the filename and the second element is a diff script
or a diff script, expressed as a list of pairs, where the first element of
the list is the filename and the second element is a diff script *)
method construct_rep : string option -> ((string * string list) list * Cdiff.tree_node IntMap.t) option -> unit
method output : string -> unit
method name : unit -> string
method is_max_fitness : unit -> bool
end
class virtual minimizableObject = object(self : #minimizableObjectType)
method structural_signature () =
already_signatured : = Some(s ) ;
method virtual internal_structural_signature : unit -> structural_signature
end
module DiffElement =
struct
type t = int * string
let compare (x,_) (y,_) = x - y
end
module DiffSet = Set.Make(DiffElement)
let map_union (map1) (map2) : Cdiff.tree_node IntMap.t =
IntMap.fold
(fun k -> fun v -> fun new_map -> IntMap.add k v new_map)
map1 map2
let split str =
let split_str = Str.split whitespace_regexp str in
match split_str with
| [a; b; c; d] -> a, a^" "^b^" "^c^" "^d
| _ -> assert(false)
let script_to_pair_list a =
List.fold_left (fun (acc : (string * (string list)) list) (ele : string) ->
let (a : string),(b : string) = split ele in
match acc with
| (a',b') :: tl when a=a' -> (a',(b'@[b])) :: tl
| x -> (a,[b]) :: x
) [] a
let cdiff_data_ht = hcreate 255
* structural_difference_edit_script sig1 sig2 returns a list of ( file ,
global_diffs list ) elements , where a global_diff is a pair of ( global_name , edit
operations ) . This list represents the difference between the two signatures
sig1 and sig2 as reported by Cdiff , our implementation of Xdiff .
global_diffs list) elements, where a global_diff is a pair of (global_name, edit
operations). This list represents the difference between the two signatures
sig1 and sig2 as reported by Cdiff, our implementation of Xdiff. *)
let structural_difference_edit_script
(sig1 : structural_signature)
(sig2 : structural_signature) =
let node_map = map_union sig1.node_map sig2.node_map in
let final_result = ref [] in
Hashtbl.clear cdiff_data_ht;
if map_cardinal sig1.signature == 1 then
StringMap.iter
(fun filename filemap ->
let file2 = StringMap.find filename sig2.signature in
let inner_result = ref [] in
StringMap.iter
(fun global_name1 t1->
let t2 = StringMap.find global_name1 file2 in
let m = Cdiff.mapping node_map t1 t2 in
Hashtbl.add cdiff_data_ht global_name1 (m,t1,t2);
let s =
Cdiff.generate_script
node_map
(Cdiff.node_of_nid node_map t1)
(Cdiff.node_of_nid node_map t2) m
in
inner_result := (global_name1,s) :: !inner_result)
filemap;
final_result := (filename, (List.rev !inner_result) ) :: !final_result)
sig1.signature;
List.rev !final_result
* structural_difference_to_string rep1 computes the edit script
describing the changes between rep1 and and reifies the result as a
string
describing the changes between rep1 and rep2 and reifies the result as a
string *)
let structural_difference_to_string rep1 rep2 =
let script = structural_difference_edit_script rep1 rep2 in
lfoldl (fun str (file,file_diffs) ->
lfoldl (fun str (global,global_diffs) ->
lfoldl (fun str elt ->
let as_string =
Printf.sprintf "%s %s %s\n" file global
FIXME
in
str^as_string
) str global_diffs
) str file_diffs
) "" script
let process_representation (orig : minimizableObjectType) (node_map : Cdiff.tree_node IntMap.t) diff_script =
let the_rep = orig#copy() in
if !minimize_patch then
let script = lfoldl (fun acc str -> acc^" "^str) "" diff_script in
the_rep#construct_rep (Some(script)) (None)
else
the_rep#construct_rep (None) (Some((script_to_pair_list diff_script), node_map));
the_rep#is_max_fitness ()
let delta_set_to_list set = lmap snd (DiffSet.elements set)
let delta_count = ref 0
* { b delta_debugging } original_variant diff_script node_map returns a
one - minimal subset of diff_script that , when applied to original_variant ,
produces a variant that still passes all test cases . delta_debugging
basically implements 's delta_debugging algorithm .
one-minimal subset of diff_script that, when applied to original_variant,
produces a variant that still passes all test cases. delta_debugging
basically implements Zeller's delta_debugging algorithm. *)
let delta_debugging orig to_minimize node_map = begin
if not (process_representation orig (copy node_map) to_minimize) then
abort "Delta debugging: original script doesn't pass all test cases (and it should)!\n"
else debug "GOOD NEWS: original script passes!\n";
let counter = ref 0 in
let c =
lfoldl
(fun c x ->
let c =
DiffSet.add ((!counter),x) c in
incr counter; c
) (DiffSet.empty) to_minimize in
let l_find func lst =
try
let res = List.find func lst in
true,Some(res)
with Not_found -> false, None
in
let rec delta_debug c n =
incr delta_count;
debug "Entering delta, pass number %d...\n" !delta_count;
let count = ref 0 in
let ci_array = Array.init n (fun _ -> DiffSet.empty) in
if n<=(DiffSet.cardinal c) then begin
DiffSet.iter (fun (num,x) ->
ci_array.(!count mod n) <- DiffSet.add (num,x) ci_array.(!count mod n);
incr count
) c;
let ci_list = Array.to_list ci_array in
let found,res =
l_find
(fun c_i ->
let node_map' = copy node_map in
let delta_set_lst = delta_set_to_list c_i in
process_representation orig node_map' delta_set_lst)
ci_list
in
if found then delta_debug (get_opt res) 2
else
let found, res =
l_find
(fun c_i ->
let delta_set_lst = delta_set_to_list (DiffSet.diff c c_i) in
let node_map' = copy node_map in
process_representation orig node_map' delta_set_lst)
ci_list
in
if found then
let ci = get_opt res in
delta_debug (DiffSet.diff c ci) (max (n-1) 2)
else if n < ((DiffSet.cardinal c)) then
delta_debug c (min (2*n) (DiffSet.cardinal c))
else c
end else c
in
let minimized_script = delta_debug c 2 in
let minimized = delta_set_to_list minimized_script in
let min_rep = orig#copy() in
min_rep#construct_rep (None) (Some((script_to_pair_list minimized), node_map));
min_rep#output "Minimization_Files/minimized.c";
let output_name = "minimized.diffscript" in
ensure_directories_exist ("Minimization_Files/full."^output_name);
let fout = open_out ("Minimization_Files/full."^output_name) in
liter (fun x -> Printf.fprintf fout "%s\n" x) minimized;
minimized
end
* { b do_minimization } original_variant initial_repair_variant computes the
edit list between the two input variants and calls delta_debugging to
produce a 1 - minimal subset of those edits ( if minimization is enabled )
edit list between the two input variants and calls delta_debugging to
produce a 1-minimal subset of those edits (if minimization is enabled) *)
let do_minimization orig rep rep_name =
if !minimization then begin
let to_minimize,node_map =
if !minimize_patch then
Str.split space_regexp rep_name, IntMap.empty
else begin
let orig_sig = orig#structural_signature() in
let rep_sig = rep#structural_signature() in
let node_map : Cdiff.tree_node IntMap.t =
map_union orig_sig.node_map rep_sig.node_map
in
let node_id_to_node = hcreate 10 in
IntMap.iter
(fun node_id -> fun node -> hadd node_id_to_node node_id node)
node_map;
let diff_script = structural_difference_to_string orig_sig rep_sig in
Str.split (Str.regexp "\n") diff_script, node_map
end
in
let output_name = "minimized.diffscript" in
ensure_directories_exist ("Minimization_Files/full."^output_name);
CLG question to self : does output as used below do the reasonable thing
for multi - file variants ? I suspect it does , but should probably check .
for multi-file variants? I suspect it does, but should probably check. *)
orig#output "Minimization_Files/original.c";
rep#output "Minimization_Files/unminimized.c";
ignore(delta_debugging orig to_minimize node_map)
end
|
84c3d5758175b84e4fa3bbb481af4b722f0fa01a6598bab6efe27435b7eabd77 | amnh/PCG | Special.hs | -----------------------------------------------------------------------------
-- |
-- Module : Data.Alphabet.Special
Copyright : ( c ) 2015 - 2021 Ward Wheeler
-- License : BSD-style
--
-- Maintainer :
-- Stability : provisional
-- Portability : portable
--
-----------------------------------------------------------------------------
module Data.Alphabet.Special
( -- * Special Alphabet constructions
aminoAcidAlphabet
, dnaAlphabet
, rnaAlphabet
, discreteAlphabet
-- * Special Alphabet Queries
, isAlphabetAminoAcid
, isAlphabetDna
, isAlphabetRna
, isAlphabetDiscrete
) where
import Data.Alphabet.IUPAC
import Data.Alphabet.Internal
import Data.Bimap (Bimap)
import qualified Data.Bimap as BM
import Data.Char (isUpper)
import Data.Foldable
import Data.List.NonEmpty (NonEmpty)
import qualified Data.List.NonEmpty as NE
import qualified Data.Set as Set
import Data.String
-- |
Alphabet of amino acids .
aminoAcidAlphabet :: (IsString s, Ord s) => Alphabet s
aminoAcidAlphabet = fromBimap iupacToAminoAcid
-- |
Alphabet of DNA bases .
dnaAlphabet :: (IsString s, Ord s) => Alphabet s
dnaAlphabet = fromBimap iupacToDna
-- |
Alphabet of RNA bases .
rnaAlphabet :: (IsString s, Ord s) => Alphabet s
rnaAlphabet = fromBimap iupacToRna
-- |
Alphabet of " discrete " values .
--
The discrete alphabet includes the following 63 values :
--
@ [ \'0\' .. \'9\ ' ] < > [ \'A\' .. \'Z\ ' ] < > [ \'a\' .. \'z\ ' ] < > " - " @
discreteAlphabet :: (IsString s, Ord s) => Alphabet s
discreteAlphabet = fromSymbols $ fromString . pure <$> fold [['0'..'9'], ['A'..'Z'], ['a'..'z'], "-"]
-- |
-- /O(n)/
--
-- Determines if the supplied alphabet represents amino acid symbols.
--
Useful for determining if an ' NonEmpty ' should be rendered as an IUPAC
-- code.
isAlphabetAminoAcid :: (IsString s, Ord s) => Alphabet s -> Bool
isAlphabetAminoAcid = isAlphabetSubsetOf aminoAcidAlphabet
-- |
-- /O(n)/
--
-- Determines if the supplied alphabet represents DNA symbols.
--
Useful for determining if an ' NonEmpty ' should be rendered as an IUPAC
-- code.
isAlphabetDna :: (IsString s, Ord s) => Alphabet s -> Bool
isAlphabetDna = isAlphabetSubsetOf dnaAlphabet
-- |
-- /O(n)/
--
-- Determines if the supplied alphabet represents DNA symbols.
--
Useful for determining if an ' NonEmpty ' should be rendered as an IUPAC
-- code.
isAlphabetRna :: (IsString s, Ord s) => Alphabet s -> Bool
isAlphabetRna = isAlphabetSubsetOf rnaAlphabet
-- |
-- /O(n)/
--
-- Determines if the supplied alphabet represents DNA symbols.
--
Useful for determining if an ' NonEmpty ' should be rendered as an IUPAC
-- code.
isAlphabetDiscrete :: (IsString s, Ord s) => Alphabet s -> Bool
isAlphabetDiscrete = isAlphabetSubsetOf discreteAlphabet
isAlphabetSubsetOf :: Ord s => Alphabet s -> Alphabet s -> Bool
isAlphabetSubsetOf specialAlpahbet queryAlphabet = querySet `Set.isSubsetOf` specialSet
where
querySet = Set.fromList $ toList queryAlphabet
specialSet = Set.fromList $ toList specialAlpahbet
fromBimap :: (IsString s, Ord s) => Bimap (NonEmpty String) a -> Alphabet s
fromBimap = fromSymbols . fmap fromString . filter isUpperCaseStr . fmap NE.head . BM.keys
where
isUpperCaseStr (x:_) = isUpper x
isUpperCaseStr _ = False
| null | https://raw.githubusercontent.com/amnh/PCG/9341efe0ec2053302c22b4466157d0a24ed18154/lib/alphabet/src/Data/Alphabet/Special.hs | haskell | ---------------------------------------------------------------------------
|
Module : Data.Alphabet.Special
License : BSD-style
Maintainer :
Stability : provisional
Portability : portable
---------------------------------------------------------------------------
* Special Alphabet constructions
* Special Alphabet Queries
|
|
|
|
|
/O(n)/
Determines if the supplied alphabet represents amino acid symbols.
code.
|
/O(n)/
Determines if the supplied alphabet represents DNA symbols.
code.
|
/O(n)/
Determines if the supplied alphabet represents DNA symbols.
code.
|
/O(n)/
Determines if the supplied alphabet represents DNA symbols.
code. | Copyright : ( c ) 2015 - 2021 Ward Wheeler
module Data.Alphabet.Special
aminoAcidAlphabet
, dnaAlphabet
, rnaAlphabet
, discreteAlphabet
, isAlphabetAminoAcid
, isAlphabetDna
, isAlphabetRna
, isAlphabetDiscrete
) where
import Data.Alphabet.IUPAC
import Data.Alphabet.Internal
import Data.Bimap (Bimap)
import qualified Data.Bimap as BM
import Data.Char (isUpper)
import Data.Foldable
import Data.List.NonEmpty (NonEmpty)
import qualified Data.List.NonEmpty as NE
import qualified Data.Set as Set
import Data.String
Alphabet of amino acids .
aminoAcidAlphabet :: (IsString s, Ord s) => Alphabet s
aminoAcidAlphabet = fromBimap iupacToAminoAcid
Alphabet of DNA bases .
dnaAlphabet :: (IsString s, Ord s) => Alphabet s
dnaAlphabet = fromBimap iupacToDna
Alphabet of RNA bases .
rnaAlphabet :: (IsString s, Ord s) => Alphabet s
rnaAlphabet = fromBimap iupacToRna
Alphabet of " discrete " values .
The discrete alphabet includes the following 63 values :
@ [ \'0\' .. \'9\ ' ] < > [ \'A\' .. \'Z\ ' ] < > [ \'a\' .. \'z\ ' ] < > " - " @
discreteAlphabet :: (IsString s, Ord s) => Alphabet s
discreteAlphabet = fromSymbols $ fromString . pure <$> fold [['0'..'9'], ['A'..'Z'], ['a'..'z'], "-"]
Useful for determining if an ' NonEmpty ' should be rendered as an IUPAC
isAlphabetAminoAcid :: (IsString s, Ord s) => Alphabet s -> Bool
isAlphabetAminoAcid = isAlphabetSubsetOf aminoAcidAlphabet
Useful for determining if an ' NonEmpty ' should be rendered as an IUPAC
isAlphabetDna :: (IsString s, Ord s) => Alphabet s -> Bool
isAlphabetDna = isAlphabetSubsetOf dnaAlphabet
Useful for determining if an ' NonEmpty ' should be rendered as an IUPAC
isAlphabetRna :: (IsString s, Ord s) => Alphabet s -> Bool
isAlphabetRna = isAlphabetSubsetOf rnaAlphabet
Useful for determining if an ' NonEmpty ' should be rendered as an IUPAC
isAlphabetDiscrete :: (IsString s, Ord s) => Alphabet s -> Bool
isAlphabetDiscrete = isAlphabetSubsetOf discreteAlphabet
isAlphabetSubsetOf :: Ord s => Alphabet s -> Alphabet s -> Bool
isAlphabetSubsetOf specialAlpahbet queryAlphabet = querySet `Set.isSubsetOf` specialSet
where
querySet = Set.fromList $ toList queryAlphabet
specialSet = Set.fromList $ toList specialAlpahbet
fromBimap :: (IsString s, Ord s) => Bimap (NonEmpty String) a -> Alphabet s
fromBimap = fromSymbols . fmap fromString . filter isUpperCaseStr . fmap NE.head . BM.keys
where
isUpperCaseStr (x:_) = isUpper x
isUpperCaseStr _ = False
|
166e73770c871c20e48bd5024dadc227f4ba0906820ed0e2f3899c1311de5cef | c-cube/batsat-ocaml | Batsat.mli |
(* This file is free software. See file "license" for more details. *)
* { 1 Bindings to Batsat }
type t
* An instance of batsat ( stateful )
type 'a printer = Format.formatter -> 'a -> unit
module Lit : sig
type t = private int
* Some representation of literals that will be accepted by the SAT solver .
val equal : t -> t -> bool
val hash : t -> int
val compare : t -> t -> int
val make : int -> t
(** [make n] creates the literal whose index is [n].
{b NOTE} [n] must be strictly positive. Use {!neg} to obtain
the negation of a literal. *)
val make_with_sign : bool -> int -> t
* [ make_with_sign b x ] is [ if b then make x else neg ( make x ) ] .
It applies the given sign to [ make x ] .
@since 0.6
It applies the given sign to [make x].
@since 0.6 *)
val neg : t -> t
(** Negation of a literal.
Invariant: [neg (neg x) = x] *)
val abs : t -> t
(** Absolute value (removes negation if any). *)
val sign : t -> bool
(** Sign: [true] if the literal is positive, [false] for a negated literal.
Invariants:
[sign (abs x) = true]
[sign (neg x) = not (sign x)]
*)
val to_int : t -> int
val to_string : t -> string
val pp : t printer
end
type assumptions = Lit.t array
val create : unit -> t
exception Unsat
val add_clause_l : t -> Lit.t list -> unit
(** @raise Unsat if the problem is unsat *)
val add_clause_a : t -> Lit.t array -> unit
(** @raise Unsat if the problem is unsat *)
val pp_clause : Lit.t list printer
val simplify : t -> unit
(** @raise Unsat if the problem is unsat *)
val solve : ?assumptions:assumptions -> t -> unit
* Solve the problem made by adding clauses using { ! }
or { ! } .
@raise Unsat if the problem is unsat
or {!add_clause_a}.
@raise Unsat if the problem is unsat *)
val solve_is_sat : ?assumptions:assumptions -> t -> bool
* Same as { ! solve } but does not raise if unsat .
@since 0.6
@since 0.6
*)
val n_vars : t -> int
val n_clauses : t -> int
val n_conflicts : t -> int
val n_props : t -> int
* Number of SAT propagations
@since 0.4
@since 0.4 *)
val n_decisions : t -> int
* Number of SAT decisions
@since 0.4
@since 0.4 *)
val is_in_unsat_core : t -> Lit.t -> bool
* [ is_in_unsat_core s lit ] checks whether [ abs(lit ) ] is part of the
unsat core ( if it was an assumption )
precondition : last call to { ! solve } raised { ! }
unsat core (if it was an assumption)
precondition: last call to {!solve} raised {!Unsat} *)
val unsat_core : t -> Lit.t array
* Access the whole unsat core
precondition : last call to { ! solve } raised { ! }
precondition: last call to {!solve} raised {!Unsat} *)
val n_proved_lvl_0 : t -> int
* Number of literals true at ( ie proved unconditionally ) .
Can only grow .
Can only grow. *)
val get_proved_lvl_0 : t -> int -> Lit.t
(** Get the n-th proved literal *)
val proved_lvl_0 : t -> Lit.t array
* All literals currently proved at level 0
type value =
| V_undef
| V_true
| V_false
val pp_value : value printer
val string_of_value : value -> string
val value : t -> Lit.t -> value
val value_lvl_0 : t -> Lit.t -> value
* [ value_lvl_0 solver lit ] returns the value of [ lit ] if it has this
value at level 0 ( proved ) , or [ V_undef ] otherwise
value at level 0 (proved), or [V_undef] otherwise *)
| null | https://raw.githubusercontent.com/c-cube/batsat-ocaml/2a092d43f117e2e7c31b20459593e174794d2ae0/src/Batsat.mli | ocaml | This file is free software. See file "license" for more details.
* [make n] creates the literal whose index is [n].
{b NOTE} [n] must be strictly positive. Use {!neg} to obtain
the negation of a literal.
* Negation of a literal.
Invariant: [neg (neg x) = x]
* Absolute value (removes negation if any).
* Sign: [true] if the literal is positive, [false] for a negated literal.
Invariants:
[sign (abs x) = true]
[sign (neg x) = not (sign x)]
* @raise Unsat if the problem is unsat
* @raise Unsat if the problem is unsat
* @raise Unsat if the problem is unsat
* Get the n-th proved literal |
* { 1 Bindings to Batsat }
type t
* An instance of batsat ( stateful )
type 'a printer = Format.formatter -> 'a -> unit
module Lit : sig
type t = private int
* Some representation of literals that will be accepted by the SAT solver .
val equal : t -> t -> bool
val hash : t -> int
val compare : t -> t -> int
val make : int -> t
val make_with_sign : bool -> int -> t
* [ make_with_sign b x ] is [ if b then make x else neg ( make x ) ] .
It applies the given sign to [ make x ] .
@since 0.6
It applies the given sign to [make x].
@since 0.6 *)
val neg : t -> t
val abs : t -> t
val sign : t -> bool
val to_int : t -> int
val to_string : t -> string
val pp : t printer
end
type assumptions = Lit.t array
val create : unit -> t
exception Unsat
val add_clause_l : t -> Lit.t list -> unit
val add_clause_a : t -> Lit.t array -> unit
val pp_clause : Lit.t list printer
val simplify : t -> unit
val solve : ?assumptions:assumptions -> t -> unit
* Solve the problem made by adding clauses using { ! }
or { ! } .
@raise Unsat if the problem is unsat
or {!add_clause_a}.
@raise Unsat if the problem is unsat *)
val solve_is_sat : ?assumptions:assumptions -> t -> bool
* Same as { ! solve } but does not raise if unsat .
@since 0.6
@since 0.6
*)
val n_vars : t -> int
val n_clauses : t -> int
val n_conflicts : t -> int
val n_props : t -> int
* Number of SAT propagations
@since 0.4
@since 0.4 *)
val n_decisions : t -> int
* Number of SAT decisions
@since 0.4
@since 0.4 *)
val is_in_unsat_core : t -> Lit.t -> bool
* [ is_in_unsat_core s lit ] checks whether [ abs(lit ) ] is part of the
unsat core ( if it was an assumption )
precondition : last call to { ! solve } raised { ! }
unsat core (if it was an assumption)
precondition: last call to {!solve} raised {!Unsat} *)
val unsat_core : t -> Lit.t array
* Access the whole unsat core
precondition : last call to { ! solve } raised { ! }
precondition: last call to {!solve} raised {!Unsat} *)
val n_proved_lvl_0 : t -> int
* Number of literals true at ( ie proved unconditionally ) .
Can only grow .
Can only grow. *)
val get_proved_lvl_0 : t -> int -> Lit.t
val proved_lvl_0 : t -> Lit.t array
* All literals currently proved at level 0
type value =
| V_undef
| V_true
| V_false
val pp_value : value printer
val string_of_value : value -> string
val value : t -> Lit.t -> value
val value_lvl_0 : t -> Lit.t -> value
* [ value_lvl_0 solver lit ] returns the value of [ lit ] if it has this
value at level 0 ( proved ) , or [ V_undef ] otherwise
value at level 0 (proved), or [V_undef] otherwise *)
|
4ecaa9787516639544ad003364efbee70a4a95c28a48b4b27170cf0508275e02 | shirok/Gauche | stk.scm | ;;;
port.stk - stk compatibility interface
;;;
Copyright ( c ) 2000 - 2022 < >
;;;
;;; Redistribution and use in source and binary forms, with or without
;;; modification, are permitted provided that the following conditions
;;; are met:
;;;
;;; 1. Redistributions of source code must retain the above copyright
;;; notice, this list of conditions and the following disclaimer.
;;;
;;; 2. Redistributions in binary form must reproduce the above copyright
;;; notice, this list of conditions and the following disclaimer in the
;;; documentation and/or other materials provided with the distribution.
;;;
;;; 3. Neither the name of the authors nor the names of its contributors
;;; may be used to endorse or promote products derived from this
;;; software without specific prior written permission.
;;;
;;; THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
" AS IS " AND ANY EXPRESS OR IMPLIED WARRANTIES , INCLUDING , BUT NOT
;;; LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
;;; A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
;;; OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL , EXEMPLARY , OR CONSEQUENTIAL DAMAGES ( INCLUDING , BUT NOT LIMITED
;;; TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
;;; PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
LIABILITY , WHETHER IN CONTRACT , STRICT LIABILITY , OR TORT ( INCLUDING
;;; NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
;;; SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
;;;
#!no-fold-case
(define-module compat.stk
(use scheme.list)
(use scheme.charset)
(use srfi.13)
(use gauche.sequence)
(export *argc*
copy-tree remq remv remove string->uninterned-symbol bignum?
string-find string-index string-lower string-upper split-string
vector-copy vector-resize promise? continuation? catch
procedure-body input-file-port? output-file-port? open-file
close-port port-closed? try-load autoload?
when-port-readable when-port-writable error
input-string-port? output-string-port? read-from-string
open-input-virtual open-output-virtual
input-virtual-port? output-virtual-port?
keyword->string
environment? the-environment parent-environment global-environment
environment->list procedure-environment
eval-hook
export-symbol export-all-symbols
module-environment module-symbols
macro macro? macro-expand macro-expand-1 macro-body
address-of address?
set-signal-handler! add-signal-handler! get-signal-handlers
send-signal
getcwd chdir getpid expand-file-name canonical-path
system getenv setenv! file-is-readable? file-is-writable?
file-is-executable? glob remove-file rename-file
temporary-file-name
gc gc-stats expand-heap version machine-type
random set-random-seed! dump get-internal-info
time uncode
posix-perror posix-stat posix-stat->vector
posix-access posix-pipe posix-unlink posix-symlink
posix-chmod posix-rename posix-getlogin posix-mkdir
posix-rmdir posix-time posix-ctime posix-localtime
posix-gmtime posix-mktime posix-tm->vector vector->posix-tm
posix-strftime posix-fork posix-wait posix-uname
posix-host-name posix-domain-name
)
)
(select-module compat.stk)
;(define *argc* (length *argv*))
; copy-tree
(define (remq item list) (delete item list eq?))
(define (remv item list) (delete item list eqv?))
(define (remove item list) (delete item list equal?))
; string->uninterned-symbol
; bignum?
(define (string-find sub str)
(number? (string-contains str sub)))
(define (string-index sub str)
(string-contains str sub))
(define string-lower string-downcase)
(define string-upper string-upcase)
(define (split-string str :optional (delim #[\s]))
(string-tokenize str (char-set-complement delim)))
; vector-copy
; vector-resize
; promise?
; continuation?
; catch
; procedure-body
; input-file-port?
; output-file-port?
; open-file
; close-port
; port-closed?
; try-load
; autoload?
; when-port-readable
; when-port-writable
(define error errorf)
; input-string-port?
; output-string-port?
(define (read-from-string str) (with-input-from-string str read))
; open-input-virtual
; open-output-virtual
; input-virtual-port?
; output-virtual-port?
; keyword->string
; environment?
; the-environment
; parent-environent
; global-environment
; environment->list
; procedure-environment
; eval-hook
; export-symbol
; export-all-symbols
; module-environment
; module-symbols
; macro
; macro?
; macro-expand
; macro-expand-1
; macro-body
; address-of
; address?
; set-signal-handler!
; add-signal-handler!
; get-signal-handlers
; send-signal
(define getcwd sys-getcwd)
(define chdir sys-chdir)
(define getpid sys-getpid)
(define (expand-file-name name)
(sys-normalize-pathname name :expand #t))
(define (canonical-path name)
(sys-normalize-pathname name :canonical #t))
(define system sys-system)
(define getenv sys-getenv)
(define setenv! sys-putenv)
(define (file-is-readable? file) (sys-access file R_OK))
(define (file-is-writable? file) (sys-access file W_OK))
(define (file-is-executable? file) (sys-access file X_OK))
(define glob sys-glob)
(define remove-file sys-unlink)
(define rename-file sys-rename)
(define temporary-file-name sys-tmpnam)
(define (eval-string str) (eval (read-from-string str)))
; gc
; gc-stats
; expand-heap
(define version gauche-version)
; machine-type
(define random sys-random)
(define set-random-seed! sys-srandom)
; dump
; get-internal-info
; time
; uncode
;; POSIX
(provide "posix") ; to fool (require "posix")
; *errno*
; posix-perror
(define posix-stat sys-stat)
(define (posix-stat->vector stat)
(apply vector
(map (^p (p stat))
(list sys-stat->dev
sys-stat->ino
sys-stat->mode
sys-stat->nlink
sys-stat->uid
sys-stat->gid
sys-stat->size
sys-stat->atime
sys-stat->mtime
sys-stat->ctime))))
(define posix-access sys-access)
(define (posix-pipe)
(receive io (sys-pipe) io))
(define posix-unlink sys-unlink)
(define posix-symlink sys-symlink)
(define posix-chmod sys-chmod)
(define posix-rename sys-rename)
(define posix-getlogin sys-getlogin)
(define posix-rmdir sys-rmdir)
(define posix-time sys-time)
(define posix-ctime sys-ctime)
(define posix-localtime sys-localtime)
(define posix-gmtime sys-gmtime)
(define posix-mktime sys-mktime)
(define (posix-tm->vector tm)
(error "not implemented yet"))
(define (vector->posix-tm tm)
(error "not implemented yet"))
(define posix-strftime sys-strftime)
(define posix-fork sys-fork)
(define (posix-wait)
(receive st (sys-wait) st))
(define (posix-uname)
(list->vector (sys-uname)))
(define posix-host-name sys-gethostname)
(define posix-domain-name sys-getdomainname)
| null | https://raw.githubusercontent.com/shirok/Gauche/e97efb5e54f9ab97746369b8ac748f338224c746/lib/compat/stk.scm | scheme |
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions
are met:
1. Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
3. Neither the name of the authors nor the names of its contributors
may be used to endorse or promote products derived from this
software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
(define *argc* (length *argv*))
copy-tree
string->uninterned-symbol
bignum?
vector-copy
vector-resize
promise?
continuation?
catch
procedure-body
input-file-port?
output-file-port?
open-file
close-port
port-closed?
try-load
autoload?
when-port-readable
when-port-writable
input-string-port?
output-string-port?
open-input-virtual
open-output-virtual
input-virtual-port?
output-virtual-port?
keyword->string
environment?
the-environment
parent-environent
global-environment
environment->list
procedure-environment
eval-hook
export-symbol
export-all-symbols
module-environment
module-symbols
macro
macro?
macro-expand
macro-expand-1
macro-body
address-of
address?
set-signal-handler!
add-signal-handler!
get-signal-handlers
send-signal
gc
gc-stats
expand-heap
machine-type
dump
get-internal-info
time
uncode
POSIX
to fool (require "posix")
*errno*
posix-perror | port.stk - stk compatibility interface
Copyright ( c ) 2000 - 2022 < >
" AS IS " AND ANY EXPRESS OR IMPLIED WARRANTIES , INCLUDING , BUT NOT
SPECIAL , EXEMPLARY , OR CONSEQUENTIAL DAMAGES ( INCLUDING , BUT NOT LIMITED
LIABILITY , WHETHER IN CONTRACT , STRICT LIABILITY , OR TORT ( INCLUDING
#!no-fold-case
(define-module compat.stk
(use scheme.list)
(use scheme.charset)
(use srfi.13)
(use gauche.sequence)
(export *argc*
copy-tree remq remv remove string->uninterned-symbol bignum?
string-find string-index string-lower string-upper split-string
vector-copy vector-resize promise? continuation? catch
procedure-body input-file-port? output-file-port? open-file
close-port port-closed? try-load autoload?
when-port-readable when-port-writable error
input-string-port? output-string-port? read-from-string
open-input-virtual open-output-virtual
input-virtual-port? output-virtual-port?
keyword->string
environment? the-environment parent-environment global-environment
environment->list procedure-environment
eval-hook
export-symbol export-all-symbols
module-environment module-symbols
macro macro? macro-expand macro-expand-1 macro-body
address-of address?
set-signal-handler! add-signal-handler! get-signal-handlers
send-signal
getcwd chdir getpid expand-file-name canonical-path
system getenv setenv! file-is-readable? file-is-writable?
file-is-executable? glob remove-file rename-file
temporary-file-name
gc gc-stats expand-heap version machine-type
random set-random-seed! dump get-internal-info
time uncode
posix-perror posix-stat posix-stat->vector
posix-access posix-pipe posix-unlink posix-symlink
posix-chmod posix-rename posix-getlogin posix-mkdir
posix-rmdir posix-time posix-ctime posix-localtime
posix-gmtime posix-mktime posix-tm->vector vector->posix-tm
posix-strftime posix-fork posix-wait posix-uname
posix-host-name posix-domain-name
)
)
(select-module compat.stk)
(define (remq item list) (delete item list eq?))
(define (remv item list) (delete item list eqv?))
(define (remove item list) (delete item list equal?))
(define (string-find sub str)
(number? (string-contains str sub)))
(define (string-index sub str)
(string-contains str sub))
(define string-lower string-downcase)
(define string-upper string-upcase)
(define (split-string str :optional (delim #[\s]))
(string-tokenize str (char-set-complement delim)))
(define error errorf)
(define (read-from-string str) (with-input-from-string str read))
(define getcwd sys-getcwd)
(define chdir sys-chdir)
(define getpid sys-getpid)
(define (expand-file-name name)
(sys-normalize-pathname name :expand #t))
(define (canonical-path name)
(sys-normalize-pathname name :canonical #t))
(define system sys-system)
(define getenv sys-getenv)
(define setenv! sys-putenv)
(define (file-is-readable? file) (sys-access file R_OK))
(define (file-is-writable? file) (sys-access file W_OK))
(define (file-is-executable? file) (sys-access file X_OK))
(define glob sys-glob)
(define remove-file sys-unlink)
(define rename-file sys-rename)
(define temporary-file-name sys-tmpnam)
(define (eval-string str) (eval (read-from-string str)))
(define version gauche-version)
(define random sys-random)
(define set-random-seed! sys-srandom)
(define posix-stat sys-stat)
(define (posix-stat->vector stat)
(apply vector
(map (^p (p stat))
(list sys-stat->dev
sys-stat->ino
sys-stat->mode
sys-stat->nlink
sys-stat->uid
sys-stat->gid
sys-stat->size
sys-stat->atime
sys-stat->mtime
sys-stat->ctime))))
(define posix-access sys-access)
(define (posix-pipe)
(receive io (sys-pipe) io))
(define posix-unlink sys-unlink)
(define posix-symlink sys-symlink)
(define posix-chmod sys-chmod)
(define posix-rename sys-rename)
(define posix-getlogin sys-getlogin)
(define posix-rmdir sys-rmdir)
(define posix-time sys-time)
(define posix-ctime sys-ctime)
(define posix-localtime sys-localtime)
(define posix-gmtime sys-gmtime)
(define posix-mktime sys-mktime)
(define (posix-tm->vector tm)
(error "not implemented yet"))
(define (vector->posix-tm tm)
(error "not implemented yet"))
(define posix-strftime sys-strftime)
(define posix-fork sys-fork)
(define (posix-wait)
(receive st (sys-wait) st))
(define (posix-uname)
(list->vector (sys-uname)))
(define posix-host-name sys-gethostname)
(define posix-domain-name sys-getdomainname)
|
7d295cae0cc16da8fa437ebfe3e074f50c6699a324d67261c9c002437aaed48e | singpolyma/cheogram | Adhoc.hs | module Adhoc (adhocBotSession, commandList, queryCommandList) where
import Prelude ()
import BasicPrelude hiding (log)
import Control.Concurrent (myThreadId, killThread)
import Control.Concurrent.STM
import Control.Error (hush, ExceptT, runExceptT, throwE, justZ)
import Data.XML.Types as XML (Element(..), Node(NodeContent, NodeElement), Content(ContentText), isNamed, elementText, elementChildren, attributeText)
import qualified Data.XML.Types as XML
import Network.Protocol.XMPP (JID(..), parseJID, formatJID, IQ(..), IQType(..), emptyIQ, Message(..))
import qualified Network.Protocol.XMPP as XMPP
import qualified Data.CaseInsensitive as CI
import qualified Control.Concurrent.STM.Delay as Delay
import qualified Data.Attoparsec.Text as Atto
import qualified Data.Bool.HT as HT
import qualified Data.Set as Set
import qualified Data.Text as T
import qualified Data.UUID as UUID ( toString, toText )
import qualified Data.UUID.V1 as UUID ( nextUUID )
import qualified UnexceptionalIO.Trans ()
import qualified UnexceptionalIO as UIO
import CommandAction
import StanzaRec
import UniquePrefix
import Util
import qualified ConfigureDirectMessageRoute
import qualified JidSwitch
import qualified DB
sessionLifespan :: Int
sessionLifespan = 60 * 60 * seconds
where
seconds = 1000000
addOriginUUID :: (UIO.Unexceptional m) => XMPP.Message -> m XMPP.Message
addOriginUUID msg = maybe msg (addTag msg) <$> fromIO_ UUID.nextUUID
where
addTag msg uuid = msg { messagePayloads = Element (s"{urn:xmpp:sid:0}origin-id") [(s"id", [ContentText $ UUID.toText uuid])] [] : messagePayloads msg }
botHelp :: Maybe Text -> IQ -> Maybe Message
botHelp header (IQ { iqTo = Just to, iqFrom = Just from, iqPayload = Just payload }) =
Just $ mkSMS from to $ maybe mempty (++ s"\n") header ++ (s"Help:\n\t") ++ intercalate (s"\n\t") (map (\item ->
fromMaybe mempty (attributeText (s"node") item) ++ s": " ++
fromMaybe mempty (attributeText (s"name") item)
) items)
where
items = isNamed (s"{#items}item") =<< elementChildren payload
botHelp _ _ = Nothing
This replaces certain commands that the SGX supports with our sugared versions
maskCommands :: XMPP.JID -> [Element] -> [Element]
maskCommands componentJid = map (\el ->
if attributeText (s"node") el == Just ConfigureDirectMessageRoute.switchBackendNodeName then
Element (s"{#items}item") [
(s"jid", [ContentText $ formatJID componentJid ++ s"/CHEOGRAM%" ++ JidSwitch.nodeName]),
(s"node", [ContentText JidSwitch.nodeName]),
(s"name", [ContentText $ s"Change your Jabber ID"])
] []
else
el
)
commandList :: JID -> Maybe Text -> JID -> JID -> [Element] -> IQ
commandList componentJid qid from to extras =
(emptyIQ IQResult) {
iqTo = Just to,
iqFrom = Just from,
iqID = qid,
iqPayload = Just $ Element (s"{#items}query")
[(s"{#items}node", [ContentText $ s""])]
(extraItems ++ [
NodeElement $ Element (s"{#items}item") [
(s"jid", [ContentText $ formatJID componentJid ++ s"/CHEOGRAM%" ++ ConfigureDirectMessageRoute.nodeName]),
(s"node", [ContentText $ ConfigureDirectMessageRoute.nodeName]),
(s"name", [ContentText $ s"Register with backend"])
] []
])
}
where
extraItems = map NodeElement $ maskCommands componentJid $ map (\el ->
el {
elementAttributes = map (\(aname, acontent) ->
if aname == s"{#items}jid" || aname == s"jid" then
(aname, [ContentText $ formatJID componentJid])
else
(aname, acontent)
) (elementAttributes el)
}
) $ filter (\el ->
attributeText (s"node") el /= Just (s"jabber:iq:register")
) extras
withNext :: (UIO.Unexceptional m) =>
m XMPP.Message
-> Element
-> (ExceptT [Element] m XMPP.Message -> ExceptT [Element] m [Element])
-> m [Element]
withNext getMessage field answerField
| isRequired field && T.null (mconcat $ fieldValue field) = do
either return return =<< runExceptT (answerField $ lift getMessage)
| otherwise =
either return return =<< runExceptT (answerField suspension)
where
suspension = do
m <- lift getMessage
if fmap CI.mk (getBody (s"jabber:component:accept") m) == Just (s"next") then
throwE [field]
else
return m
withCancel :: (UIO.Unexceptional m) => Int -> (Text -> m ()) -> m () -> STM XMPP.Message -> m XMPP.Message
withCancel sessionLength sendText cancelSession getMessage = do
delay <- fromIO_ $ Delay.newDelay sessionLength
maybeMsg <- atomicUIO $
(Delay.waitDelay delay *> pure Nothing)
<|>
Just <$> getMessage
case maybeMsg of
Just msg
| (CI.mk <$> getBody (s"jabber:component:accept") msg) == Just (s"cancel") -> cancel $ s"cancelled"
Just msg -> return msg
Nothing -> cancel $ s"expired"
where
cancel t = do
sendText t
cancelSession
fromIO_ $ myThreadId >>= killThread
return $ error "Unreachable"
queryCommandList :: JID -> JID -> IO [StanzaRec]
queryCommandList to from = do
uuid <- (fmap.fmap) (fromString . UUID.toString) UUID.nextUUID
return [mkStanzaRec $ (queryCommandList' to from) {iqID = uuid}]
untilParse :: (UIO.Unexceptional m) => m Message -> m () -> (Text -> Maybe b) -> m b
untilParse getText onFail parser = do
text <- (fromMaybe mempty . getBody "jabber:component:accept") <$> getText
maybe parseFail return $ parser text
where
parseFail = do
onFail
untilParse getText onFail parser
formatLabel :: (Text -> Maybe Text) -> Element -> Text
formatLabel valueFormatter field = lbl ++ value ++ descSuffix
where
lbl = maybe (fromMaybe mempty $ attributeText (s"var") field) T.toTitle $ label field
value = maybe mempty (\v -> s" [Current value " ++ v ++ s"]") $ valueFormatter <=< mfilter (not . T.null) $ Just $ intercalate (s", ") (fieldValue field)
descSuffix = maybe mempty (\dsc -> s"\n(" ++ dsc ++ s")") $ desc field
adhocBotAnswerFixed :: (UIO.Unexceptional m) => (Text -> m ()) -> m XMPP.Message -> Element -> m [Element]
adhocBotAnswerFixed sendText _getMessage field = do
let values = fmap (mconcat . elementText) $ isNamed (s"{jabber:x:data}value") =<< elementChildren field
sendText $ formatLabel (const Nothing) field
sendText $ unlines values
return []
adhocBotAnswerBoolean :: (UIO.Unexceptional m) => (Text -> m ()) -> m XMPP.Message -> Element -> m [Element]
adhocBotAnswerBoolean sendText getMessage field = do
case attributeText (s"var") field of
Just var -> do
sendText $ formatLabel (fmap formatBool . hush . Atto.parseOnly parser) field ++ s"\nYes or No?"
value <- untilParse getMessage (sendText helperText) $ hush . Atto.parseOnly parser
return [Element (s"{jabber:x:data}field") [(s"var", [ContentText var])] [
NodeElement $ Element (s"{jabber:x:data}value") [] [NodeContent $ ContentText $ HT.if' value (s"true") (s"false")]
]]
_ -> log "ADHOC BOT FIELD WITHOUT VAR" field >> return []
where
helperText = s"I didn't understand your answer. Please send yes or no"
parser = Atto.skipMany Atto.space *> (
(True <$ Atto.choice (Atto.asciiCI <$> [s"true", s"t", s"1", s"yes", s"y", s"enable", s"enabled"])) <|>
(False <$ Atto.choice (Atto.asciiCI <$> [s"false", s"f", s"0", s"no", s"n", s"disable", s"disabled"]))
) <* Atto.skipMany Atto.space <* Atto.endOfInput
formatBool True = s"Yes"
formatBool False = s"No"
adhocBotAnswerTextSingle :: (UIO.Unexceptional m) => (Text -> m ()) -> m XMPP.Message -> Element -> m [Element]
adhocBotAnswerTextSingle sendText getMessage field = do
case attributeText (s"var") field of
Just var -> do
sendText $ s"Enter " ++ formatLabel Just field
value <- getMessage
case getBody "jabber:component:accept" value of
Just body -> return [Element (s"{jabber:x:data}field") [(s"var", [ContentText var])] [
NodeElement $ Element (s"{jabber:x:data}value") [] [NodeContent $ ContentText body]
]]
Nothing -> return []
_ -> log "ADHOC BOT FIELD WITHOUT VAR" field >> return []
listOptionText :: (Foldable t) => t Text -> Text -> (Int, Element) -> Text
listOptionText currentValues currentValueText (n, v) = tshow n ++ s". " ++ optionText v ++ selectedText v
where
selectedText option
| mconcat (fieldValue option) `elem` currentValues = currentValueText
| otherwise = mempty
adhocBotAnswerJidSingle :: (UIO.Unexceptional m) => (Text -> m ()) -> m XMPP.Message -> Element -> m [Element]
adhocBotAnswerJidSingle sendText getMessage field = do
case attributeText (s"var") field of
Just var -> do
sendText $ s"Enter " ++ formatLabel Just field
value <- untilParse getMessage (sendText helperText) XMPP.parseJID
return [Element (s"{jabber:x:data}field") [(s"var", [ContentText var])] [
NodeElement $ Element (s"{jabber:x:data}value") [] [NodeContent $ ContentText $ formatJID value]
]]
_ -> log "ADHOC BOT FIELD WITHOUT VAR" field >> return []
where
helperText = s"I didn't understand your answer. Please send only a valid JID like or perhaps just example.com"
adhocBotAnswerListMulti :: (UIO.Unexceptional m) => (Text -> m ()) -> m XMPP.Message -> Element -> m [Element]
adhocBotAnswerListMulti sendText getMessage field = do
case attributeText (s"var") field of
Just var -> do
let options = zip [1..] $ isNamed(s"{jabber:x:data}option") =<< elementChildren field
let currentValues = fieldValue field
let optionsText = fmap (listOptionText currentValues (s" [Currently Selected]")) options
sendText $ unlines $ [formatLabel (const Nothing) field] ++ optionsText ++ [s"Which numbers?"]
values <- untilParse getMessage (sendText helperText) (hush . Atto.parseOnly parser)
let selectedOptions = fmap snd $ filter (\(x, _) -> x `elem` values) options
return [Element (s"{jabber:x:data}field") [(s"var", [ContentText var])] $ flip fmap selectedOptions $ \option ->
NodeElement $ Element (s"{jabber:x:data}value") [] [NodeContent $ ContentText $ mconcat $ fieldValue option]
]
_ -> log "ADHOC BOT FIELD WITHOUT VAR" field >> return []
where
parser = Atto.skipMany Atto.space *> Atto.sepBy Atto.decimal (Atto.skipMany $ Atto.choice [Atto.space, Atto.char ',']) <* Atto.skipMany Atto.space <* Atto.endOfInput
helperText = s"I didn't understand your answer. Please send the numbers you want, separated by commas or spaces like \"1, 3\" or \"1 3\". Blank (or just spaces) to pick nothing."
-- Technically an option can have multiple values, but in practice it probably won't
data ListSingle = ListSingle { listLabel :: T.Text, listIsOpen :: Bool, listLookup :: (Int -> Maybe [T.Text]) }
listSingleHelper :: Element -> ListSingle
listSingleHelper field = ListSingle label open lookup
where
open = isOpenValidation (fieldValidation field)
options = zip [1..] $ isNamed(s"{jabber:x:data}option") =<< elementChildren field
currentValue = listToMaybe $ elementText =<< isNamed(s"{jabber:x:data}value") =<< elementChildren field
optionsText = fmap (listOptionText currentValue (s" [Current Value]")) options
currentValueText = (\func -> maybe [] func currentValue) $ \value ->
if open && value `notElem` (map (mconcat . fieldValue . snd) options) then
[s"[Current Value: " ++ value ++ s"]"]
else
[]
label = unlines $ [formatLabel (const Nothing) field] ++ optionsText ++ currentValueText
lookup n = fmap (fieldValue . snd) $ find (\(x, _) -> x == n) options
adhocBotAnswerListSingle :: (UIO.Unexceptional m) => (Text -> m ()) -> m XMPP.Message -> Element -> m [Element]
adhocBotAnswerListSingle sendText getMessage field = do
case attributeText (s"var") field of
Just var -> do
let list = listSingleHelper field
let open = listIsOpen list
let prompt = s"Please enter a number from the list above" ++ if open then s", or enter a custom option" else s""
sendText $ listLabel list ++ prompt
maybeOption <- if open then do
value <- untilParse getMessage (sendText helperText) (hush . Atto.parseOnly openParser)
return $ Just $ case value of
Left openValue -> [openValue]
Right itemNumber -> maybe ([tshow itemNumber]) id $ listLookup list itemNumber
else do
value <- untilParse getMessage (sendText helperText) (hush . Atto.parseOnly parser)
return $ listLookup list value
case maybeOption of
Just option -> return [Element (s"{jabber:x:data}field") [(s"var", [ContentText var])] [
NodeElement $ Element (s"{jabber:x:data}value") [] [NodeContent $ ContentText $ mconcat option]
]]
Nothing -> do
sendText $ s"Please pick one of the given options"
adhocBotAnswerListSingle sendText getMessage field
_ -> log "ADHOC BOT FIELD WITHOUT VAR" field >> return []
where
helperText = s"I didn't understand your answer. Please just send the number of the one item you want to pick, like \"1\""
parser = Atto.skipMany Atto.space *> Atto.decimal <* Atto.skipMany Atto.space <* Atto.endOfInput
openParser = (Right <$> parser) <|> (Left <$> Atto.takeText)
adhocBotAnswerForm :: (UIO.Unexceptional m) => (Text -> m ()) -> m XMPP.Message -> Element -> m Element
adhocBotAnswerForm sendText getMessage form = do
fields <- forM (filter (uncurry (||) . (isField &&& isInstructions)) $ elementChildren form) $ \field ->
let sendText' = lift . sendText in
withNext getMessage field $ \getMessage' ->
HT.select (
-- The spec says a field type we don't understand should be treated as text-single
log "ADHOC BOT UNKNOWN FIELD" field >>
adhocBotAnswerTextSingle sendText' getMessage' field
) [
(isInstructions field,
sendText' (mconcat $ elementText field) >> return []),
(attributeText (s"type") field == Just (s"list-single"),
adhocBotAnswerListSingle sendText' getMessage' field),
(attributeText (s"type") field == Just (s"list-multi"),
adhocBotAnswerListMulti sendText' getMessage' field),
(attributeText (s"type") field == Just (s"jid-single"),
adhocBotAnswerJidSingle sendText' getMessage' field),
(attributeText (s"type") field == Just (s"hidden"),
return [field]),
(attributeText (s"type") field == Just (s"fixed"),
adhocBotAnswerFixed sendText' getMessage' field),
(attributeText (s"type") field == Just (s"boolean"),
adhocBotAnswerBoolean sendText' getMessage' field),
(attributeText (s"type") field `elem` [Just (s"text-single"), Nothing],
-- The default if a type isn't specified is text-single
adhocBotAnswerTextSingle sendText' getMessage' field)
]
return $ Element (s"{jabber:x:data}x") [(s"type", [ContentText $ s"submit"])] $ NodeElement <$> mconcat fields
formatReported :: Element -> (Text, [Text])
formatReported =
first (intercalate (s"\t")) . unzip .
map (\field ->
(
formatLabel (const Nothing) field,
fromMaybe mempty (attributeText (s"var") field)
)
) . filter isField . elementChildren
formatItem :: [Text] -> Element -> Text
formatItem reportedVars item = intercalate (s"\t") $ map (\var ->
intercalate (s", ") $ findFieldValue var
) reportedVars
where
findFieldValue var = maybe [] fieldValue $ find (\field ->
attributeText (s"var") field == Just var
) fields
fields = filter isField $ elementChildren item
simpleResultField :: Text -> [Text] -> Text
simpleResultField label [value] = concat [label, s": ", value, s"\n"]
simpleResultField label values = concat [label, s":\n", unlines values]
formatResultField :: Element -> Text
formatResultField el = case maybe ("text-single") T.unpack $ attributeText (s"type") el of
"hidden" -> mempty
"list-single" -> listLabel $ listSingleHelper el
"jid-single" -> simpleResultField label $ map (s"xmpp:" ++) $ fieldValue el
_ -> simpleResultField label $ fieldValue el
where
label = formatLabel (const Nothing) el
renderResultForm :: Element -> Text
renderResultForm form =
intercalate (s"\n") $ catMaybes $ snd $
forAccumL [] (elementChildren form) $ \reportedVars el ->
HT.select (reportedVars, Nothing) $ map (second $ second Just) [
(isInstructions el, (reportedVars,
mconcat $ elementText el)),
(isField el, (reportedVars, formatResultField el)),
(isReported el,
swap $ formatReported el),
(isItem el, (reportedVars,
formatItem reportedVars el))
]
where
forAccumL z xs f = mapAccumL f z xs
waitForAction :: (UIO.Unexceptional m) => [Action] -> (Text -> m ()) -> m XMPP.Message -> m Action
waitForAction actions sendText getMessage = do
m <- getMessage
let ciBody = CI.mk <$> getBody (s"jabber:component:accept") m
HT.select whatWasThat [
(ciBody == Just (s"next"), return ActionNext),
(ciBody == Just (s"back"), return ActionPrev),
(ciBody == Just (s"cancel"), return ActionCancel),
(ciBody == Just (s"finish"), return ActionComplete)
]
where
allowedCmds = map actionCmd (ActionCancel : actions)
whatWasThat = do
sendText $
s"I didn't understand that. You can say one of: " ++
intercalate (s", ") allowedCmds
waitForAction actions sendText getMessage
label :: Element -> Maybe Text
label = attributeText (s"label")
optionText :: Element -> Text
optionText element = fromMaybe (mconcat $ fieldValue element) (label element)
fieldValue :: Element -> [Text]
fieldValue = fmap (mconcat . elementText) .
isNamed (s"{jabber:x:data}value") <=< elementChildren
fieldValidation :: Element -> Maybe Element
fieldValidation =
listToMaybe .
(isNamed (s"{-validate}validate") <=< elementChildren)
isOpenValidation :: Maybe Element -> Bool
isOpenValidation (Just el) =
not $ null $
isNamed (s"{-validate}open")
=<< elementChildren el
isOpenValidation _ = False
desc :: Element -> Maybe Text
desc = mfilter (not . T.null) . Just . mconcat .
(elementText <=< isNamed(s"{jabber:x:data}desc") <=< elementChildren)
isField :: Element -> Bool
isField el = elementName el == s"{jabber:x:data}field"
isInstructions :: Element -> Bool
isInstructions el = elementName el == s"{jabber:x:data}instructions"
isReported :: Element -> Bool
isReported el = elementName el == s"{jabber:x:data}reported"
isItem :: Element -> Bool
isItem el = elementName el == s"{jabber:x:data}item"
isRequired :: Element -> Bool
isRequired = not . null . (isNamed (s"{jabber:x:data}required") <=< elementChildren)
registerShorthand :: Text -> Maybe JID
registerShorthand body = do
gatewayJID <- hush $ Atto.parseOnly (Atto.asciiCI (s"register") *> Atto.many1 Atto.space *> Atto.takeText) body
parseJID gatewayJID
getServerInfoForm :: [XML.Element] -> Maybe XML.Element
getServerInfoForm = find (\el ->
attributeText (s"type") el == Just (s"result") &&
getFormField el (s"FORM_TYPE") == Just (s"")
) . (isNamed (s"{jabber:x:data}x") =<<)
sendHelp :: (UIO.Unexceptional m) =>
DB.DB
-> JID
-> (XMPP.Message -> m ())
-> (XMPP.IQ -> UIO.UIO (STM (Maybe XMPP.IQ)))
-> JID
-> JID
-> m ()
sendHelp db componentJid sendMessage sendIQ from routeFrom = do
maybeRoute <- (parseJID =<<) . (join . hush) <$> UIO.fromIO (DB.get db (DB.byJid from ["direct-message-route"]))
case maybeRoute of
Just route -> do
replySTM <- UIO.lift $ sendIQ $ queryCommandList' route routeFrom
discoInfoSTM <- UIO.lift $ sendIQ $ queryDiscoWithNode' Nothing route routeFrom
(mreply, mDiscoInfo) <- atomicUIO $ (,) <$> replySTM <*> discoInfoSTM
let helpMessage = botHelp
(renderResultForm <$> (getServerInfoForm . elementChildren =<< iqPayload =<< mDiscoInfo)) $
commandList componentJid Nothing componentJid from $
isNamed (s"{#items}item") =<< elementChildren =<< maybeToList (XMPP.iqPayload =<< mfilter ((== XMPP.IQResult) . XMPP.iqType) mreply)
case helpMessage of
Just msg -> sendMessage msg
Nothing -> log "INVALID HELP MESSAGE" mreply
Nothing ->
case botHelp Nothing $ commandList componentJid Nothing componentJid from [] of
Just msg -> sendMessage msg
Nothing -> log "INVALID HELP MESSAGE" ()
adhocBotRunCommand :: (UIO.Unexceptional m) => DB.DB -> JID -> JID -> (XMPP.Message -> m ()) -> (XMPP.IQ -> UIO.UIO (STM (Maybe XMPP.IQ))) -> STM XMPP.Message -> JID -> Text -> [Element] -> m ()
adhocBotRunCommand db componentJid routeFrom sendMessage sendIQ getMessage from body cmdEls = do
let (nodes, cmds) = unzip $ mapMaybe (\el -> (,) <$> attributeText (s"node") el <*> pure el) cmdEls
case (snd <$> find (\(prefixes, _) -> Set.member (CI.mk body) prefixes) (zip (uniquePrefix nodes) cmds), registerShorthand body) of
(_, Just gatewayJID) -> do
mResult <- (atomicUIO =<<) $ UIO.lift $ sendIQ $ (emptyIQ IQSet) {
iqFrom = Just routeFrom,
iqTo = Just componentJid,
iqPayload = Just $ Element (s"{}command") [(s"node", [ContentText ConfigureDirectMessageRoute.nodeName])] []
}
case attributeText (s"sessionid") =<< iqPayload =<< mResult of
Just sessionid ->
startWithIntro $ (emptyIQ IQSet) {
iqFrom = Just routeFrom,
iqTo = iqFrom =<< mResult,
iqPayload = Just $ Element (s"{}command") [(s"node", [ContentText ConfigureDirectMessageRoute.nodeName]), (s"sessionid", [ContentText sessionid]), (s"action", [ContentText $ s"next"])] [
NodeElement $ Element (fromString "{jabber:x:data}x") [
(fromString "{jabber:x:data}type", [ContentText $ s"submit"])
] [
NodeElement $ Element (fromString "{jabber:x:data}field") [
(fromString "{jabber:x:data}type", [ContentText $ s"jid-single"]),
(fromString "{jabber:x:data}var", [ContentText $ s"gateway-jid"])
] [
NodeElement $ Element (fromString "{jabber:x:data}value") [] [NodeContent $ ContentText $ formatJID gatewayJID]
]
]
]
}
Nothing -> sendHelp db componentJid sendMessage sendIQ from routeFrom
(Just cmd, Nothing) ->
startWithIntro $ (emptyIQ IQSet) {
iqFrom = Just routeFrom,
iqTo = parseJID =<< attributeText (s"jid") cmd,
iqPayload = Just $ Element (s"{}command") [(s"node", [ContentText $ fromMaybe mempty $ attributeText (s"node") cmd])] []
}
(Nothing, Nothing) -> sendHelp db componentJid sendMessage sendIQ from routeFrom
where
startWithIntro cmdIQ =
sendAndRespondTo (Just $ intercalate (s"\n") [
s"You can leave something at the current value by saying 'next'.",
s"You can return to the main menu by saying 'cancel' at any time."
]) cmdIQ
threadedMessage Nothing msg = msg
threadedMessage (Just sessionid) msg = msg { messagePayloads = (Element (s"thread") [] [NodeContent $ ContentText sessionid]) : messagePayloads msg }
continueExecution resultIQ responseBody
| Just payload <- iqPayload resultIQ,
Just sessionid <- attributeText (s"sessionid") payload,
Just "executing" <- T.unpack <$> attributeText (s"status") payload = do
let actions = listToMaybe $ isNamed(s"{}actions") =<< elementChildren payload
-- The standard says if actions is present, with no "execute" attribute, that the default is "next"
-- But if there is no actions, the default is "execute"
let defaultAction = maybe (s"execute") (fromMaybe (s"next") . attributeText (s"execute")) actions
let cmdIQ' = (emptyIQ IQSet) {
iqFrom = Just routeFrom,
iqTo = iqFrom resultIQ,
iqPayload = Just $ Element (s"{}command") [(s"node", [ContentText $ fromMaybe mempty $ attributeText (s"node") payload]), (s"sessionid", [ContentText sessionid]), (s"action", [ContentText defaultAction])] responseBody
}
sendAndRespondTo Nothing cmdIQ'
continueExecution _ _ = return ()
sendAndRespondTo intro cmdIQ = do
mcmdResult <- atomicUIO =<< UIO.lift (sendIQ cmdIQ)
case mcmdResult of
Just resultIQ
| IQResult == iqType resultIQ,
Just payload <- iqPayload resultIQ,
[form] <- isNamed (s"{jabber:x:data}x") =<< elementChildren payload,
attributeText (s"type") form == Just (s"result") -> do
let sendText = sendMessage . threadedMessage (attributeText (s"sessionid") payload) . mkSMS componentJid from
sendText $ renderResultForm form
continueExecution resultIQ []
| IQResult == iqType resultIQ,
Just payload <- iqPayload resultIQ,
Just sessionid <- attributeText (s"sessionid") payload,
Just cmd <- attributeText (s"node") payload,
[form] <- isNamed (s"{jabber:x:data}x") =<< elementChildren payload -> do
let cancelIQ = (emptyIQ IQSet) {
iqFrom = Just routeFrom,
iqTo = iqFrom resultIQ,
iqPayload = Just $ Element (s"{}command") [(s"node", [ContentText cmd]), (s"sessionid", [ContentText sessionid]), (s"action", [ContentText $ s"cancel"])] []
}
let cancel = void . atomicUIO =<< UIO.lift (sendIQ cancelIQ)
let sendText = sendMessage . threadedMessage (Just sessionid) . mkSMS componentJid from
let cancelText = sendText . ((cmd ++ s" ") ++)
forM_ intro sendText
returnForm <- adhocBotAnswerForm sendText (withCancel sessionLifespan cancelText cancel getMessage) form
continueExecution resultIQ [NodeElement returnForm]
| IQResult == iqType resultIQ,
Just payload <- iqPayload resultIQ,
notes@(_:_) <- isNamed (s"{}note") =<< elementChildren payload -> do
let sendText = sendMessage . threadedMessage (attributeText (s"sessionid") payload) . mkSMS componentJid from
forM_ notes $
sendText . mconcat . elementText
if (attributeText (s"status") payload == Just (s"executing")) then do
let actions = mapMaybe (actionFromXMPP . XML.nameLocalName . elementName) $ elementChildren =<< isNamed (s"{}actions") =<< elementChildren payload
let sessionid = maybe [] (\sessid -> [(s"sessionid", [ContentText sessid])]) $ attributeText (s"sessionid") payload
sendText $
s"You can say one of: " ++
(intercalate (s", ") $ map actionCmd (ActionCancel : actions))
action <- waitForAction actions sendText (atomicUIO getMessage)
let cmdIQ' = (emptyIQ IQSet) {
iqFrom = Just routeFrom,
iqTo = iqFrom resultIQ,
iqPayload = Just $ Element (s"{}command") ([(s"node", [ContentText $ fromMaybe mempty $ attributeText (s"node") payload]), (s"action", [actionContent action])] ++ sessionid) []
}
sendAndRespondTo Nothing cmdIQ'
else when (
attributeText (s"status") payload == Just (s"completed") &&
attributeText (s"node") payload == Just ConfigureDirectMessageRoute.nodeName &&
all (\n -> attributeText (s"type") n /= Just (s"error")) notes
) $
sendHelp db componentJid sendMessage sendIQ from routeFrom
| IQResult == iqType resultIQ,
[cmd] <- isNamed (s"{}command") =<< (justZ $ iqPayload resultIQ),
attributeText (s"status") cmd `elem` [Just (s"completed"), Just (s"canceled")] -> return ()
| otherwise -> do
log "COMMAND ERROR" resultIQ
sendMessage $ mkSMS componentJid from (s"Command error")
Nothing -> sendMessage $ mkSMS componentJid from (s"Command timed out")
adhocBotSession :: (UIO.Unexceptional m) => DB.DB -> JID -> (XMPP.Message -> m ()) -> (XMPP.IQ -> UIO.UIO (STM (Maybe XMPP.IQ))) -> STM XMPP.Message -> XMPP.Message-> m ()
adhocBotSession db componentJid sendMessage sendIQ getMessage message@(XMPP.Message { XMPP.messageFrom = Just from })
| Just body <- getBody "jabber:component:accept" message = do
maybeRoute <- (parseJID =<<) . (join . hush) <$> UIO.fromIO (DB.get db (DB.byJid from ["direct-message-route"]))
case maybeRoute of
Just route -> do
mreply <- atomicUIO =<< (UIO.lift . sendIQ) (queryCommandList' route routeFrom)
case iqPayload =<< mfilter ((==IQResult) . iqType) mreply of
Just reply -> adhocBotRunCommand db componentJid routeFrom sendMessage' sendIQ getMessage from body $ maskCommands componentJid $ elementChildren reply ++ internalCommands
Nothing -> adhocBotRunCommand db componentJid routeFrom sendMessage' sendIQ getMessage from body internalCommands
Nothing -> adhocBotRunCommand db componentJid routeFrom sendMessage' sendIQ getMessage from body internalCommands
| otherwise = sendHelp db componentJid sendMessage' sendIQ from routeFrom
where
internalCommands = elementChildren =<< maybeToList (iqPayload $ commandList componentJid Nothing componentJid from [])
Just routeFrom = parseJID $ escapeJid (bareTxt from) ++ s"@" ++ formatJID componentJid ++ s"/adhocbot"
sendMessage' = sendMessage <=< addOriginUUID
adhocBotSession _ _ _ _ _ m = log "BAD ADHOC BOT MESSAGE" m
| null | https://raw.githubusercontent.com/singpolyma/cheogram/a08cca4dc86f39687afcbf082a6d2c2d10776e56/Adhoc.hs | haskell | Technically an option can have multiple values, but in practice it probably won't
The spec says a field type we don't understand should be treated as text-single
The default if a type isn't specified is text-single
The standard says if actions is present, with no "execute" attribute, that the default is "next"
But if there is no actions, the default is "execute" | module Adhoc (adhocBotSession, commandList, queryCommandList) where
import Prelude ()
import BasicPrelude hiding (log)
import Control.Concurrent (myThreadId, killThread)
import Control.Concurrent.STM
import Control.Error (hush, ExceptT, runExceptT, throwE, justZ)
import Data.XML.Types as XML (Element(..), Node(NodeContent, NodeElement), Content(ContentText), isNamed, elementText, elementChildren, attributeText)
import qualified Data.XML.Types as XML
import Network.Protocol.XMPP (JID(..), parseJID, formatJID, IQ(..), IQType(..), emptyIQ, Message(..))
import qualified Network.Protocol.XMPP as XMPP
import qualified Data.CaseInsensitive as CI
import qualified Control.Concurrent.STM.Delay as Delay
import qualified Data.Attoparsec.Text as Atto
import qualified Data.Bool.HT as HT
import qualified Data.Set as Set
import qualified Data.Text as T
import qualified Data.UUID as UUID ( toString, toText )
import qualified Data.UUID.V1 as UUID ( nextUUID )
import qualified UnexceptionalIO.Trans ()
import qualified UnexceptionalIO as UIO
import CommandAction
import StanzaRec
import UniquePrefix
import Util
import qualified ConfigureDirectMessageRoute
import qualified JidSwitch
import qualified DB
sessionLifespan :: Int
sessionLifespan = 60 * 60 * seconds
where
seconds = 1000000
addOriginUUID :: (UIO.Unexceptional m) => XMPP.Message -> m XMPP.Message
addOriginUUID msg = maybe msg (addTag msg) <$> fromIO_ UUID.nextUUID
where
addTag msg uuid = msg { messagePayloads = Element (s"{urn:xmpp:sid:0}origin-id") [(s"id", [ContentText $ UUID.toText uuid])] [] : messagePayloads msg }
botHelp :: Maybe Text -> IQ -> Maybe Message
botHelp header (IQ { iqTo = Just to, iqFrom = Just from, iqPayload = Just payload }) =
Just $ mkSMS from to $ maybe mempty (++ s"\n") header ++ (s"Help:\n\t") ++ intercalate (s"\n\t") (map (\item ->
fromMaybe mempty (attributeText (s"node") item) ++ s": " ++
fromMaybe mempty (attributeText (s"name") item)
) items)
where
items = isNamed (s"{#items}item") =<< elementChildren payload
botHelp _ _ = Nothing
This replaces certain commands that the SGX supports with our sugared versions
maskCommands :: XMPP.JID -> [Element] -> [Element]
maskCommands componentJid = map (\el ->
if attributeText (s"node") el == Just ConfigureDirectMessageRoute.switchBackendNodeName then
Element (s"{#items}item") [
(s"jid", [ContentText $ formatJID componentJid ++ s"/CHEOGRAM%" ++ JidSwitch.nodeName]),
(s"node", [ContentText JidSwitch.nodeName]),
(s"name", [ContentText $ s"Change your Jabber ID"])
] []
else
el
)
commandList :: JID -> Maybe Text -> JID -> JID -> [Element] -> IQ
commandList componentJid qid from to extras =
(emptyIQ IQResult) {
iqTo = Just to,
iqFrom = Just from,
iqID = qid,
iqPayload = Just $ Element (s"{#items}query")
[(s"{#items}node", [ContentText $ s""])]
(extraItems ++ [
NodeElement $ Element (s"{#items}item") [
(s"jid", [ContentText $ formatJID componentJid ++ s"/CHEOGRAM%" ++ ConfigureDirectMessageRoute.nodeName]),
(s"node", [ContentText $ ConfigureDirectMessageRoute.nodeName]),
(s"name", [ContentText $ s"Register with backend"])
] []
])
}
where
extraItems = map NodeElement $ maskCommands componentJid $ map (\el ->
el {
elementAttributes = map (\(aname, acontent) ->
if aname == s"{#items}jid" || aname == s"jid" then
(aname, [ContentText $ formatJID componentJid])
else
(aname, acontent)
) (elementAttributes el)
}
) $ filter (\el ->
attributeText (s"node") el /= Just (s"jabber:iq:register")
) extras
withNext :: (UIO.Unexceptional m) =>
m XMPP.Message
-> Element
-> (ExceptT [Element] m XMPP.Message -> ExceptT [Element] m [Element])
-> m [Element]
withNext getMessage field answerField
| isRequired field && T.null (mconcat $ fieldValue field) = do
either return return =<< runExceptT (answerField $ lift getMessage)
| otherwise =
either return return =<< runExceptT (answerField suspension)
where
suspension = do
m <- lift getMessage
if fmap CI.mk (getBody (s"jabber:component:accept") m) == Just (s"next") then
throwE [field]
else
return m
withCancel :: (UIO.Unexceptional m) => Int -> (Text -> m ()) -> m () -> STM XMPP.Message -> m XMPP.Message
withCancel sessionLength sendText cancelSession getMessage = do
delay <- fromIO_ $ Delay.newDelay sessionLength
maybeMsg <- atomicUIO $
(Delay.waitDelay delay *> pure Nothing)
<|>
Just <$> getMessage
case maybeMsg of
Just msg
| (CI.mk <$> getBody (s"jabber:component:accept") msg) == Just (s"cancel") -> cancel $ s"cancelled"
Just msg -> return msg
Nothing -> cancel $ s"expired"
where
cancel t = do
sendText t
cancelSession
fromIO_ $ myThreadId >>= killThread
return $ error "Unreachable"
queryCommandList :: JID -> JID -> IO [StanzaRec]
queryCommandList to from = do
uuid <- (fmap.fmap) (fromString . UUID.toString) UUID.nextUUID
return [mkStanzaRec $ (queryCommandList' to from) {iqID = uuid}]
untilParse :: (UIO.Unexceptional m) => m Message -> m () -> (Text -> Maybe b) -> m b
untilParse getText onFail parser = do
text <- (fromMaybe mempty . getBody "jabber:component:accept") <$> getText
maybe parseFail return $ parser text
where
parseFail = do
onFail
untilParse getText onFail parser
formatLabel :: (Text -> Maybe Text) -> Element -> Text
formatLabel valueFormatter field = lbl ++ value ++ descSuffix
where
lbl = maybe (fromMaybe mempty $ attributeText (s"var") field) T.toTitle $ label field
value = maybe mempty (\v -> s" [Current value " ++ v ++ s"]") $ valueFormatter <=< mfilter (not . T.null) $ Just $ intercalate (s", ") (fieldValue field)
descSuffix = maybe mempty (\dsc -> s"\n(" ++ dsc ++ s")") $ desc field
adhocBotAnswerFixed :: (UIO.Unexceptional m) => (Text -> m ()) -> m XMPP.Message -> Element -> m [Element]
adhocBotAnswerFixed sendText _getMessage field = do
let values = fmap (mconcat . elementText) $ isNamed (s"{jabber:x:data}value") =<< elementChildren field
sendText $ formatLabel (const Nothing) field
sendText $ unlines values
return []
adhocBotAnswerBoolean :: (UIO.Unexceptional m) => (Text -> m ()) -> m XMPP.Message -> Element -> m [Element]
adhocBotAnswerBoolean sendText getMessage field = do
case attributeText (s"var") field of
Just var -> do
sendText $ formatLabel (fmap formatBool . hush . Atto.parseOnly parser) field ++ s"\nYes or No?"
value <- untilParse getMessage (sendText helperText) $ hush . Atto.parseOnly parser
return [Element (s"{jabber:x:data}field") [(s"var", [ContentText var])] [
NodeElement $ Element (s"{jabber:x:data}value") [] [NodeContent $ ContentText $ HT.if' value (s"true") (s"false")]
]]
_ -> log "ADHOC BOT FIELD WITHOUT VAR" field >> return []
where
helperText = s"I didn't understand your answer. Please send yes or no"
parser = Atto.skipMany Atto.space *> (
(True <$ Atto.choice (Atto.asciiCI <$> [s"true", s"t", s"1", s"yes", s"y", s"enable", s"enabled"])) <|>
(False <$ Atto.choice (Atto.asciiCI <$> [s"false", s"f", s"0", s"no", s"n", s"disable", s"disabled"]))
) <* Atto.skipMany Atto.space <* Atto.endOfInput
formatBool True = s"Yes"
formatBool False = s"No"
adhocBotAnswerTextSingle :: (UIO.Unexceptional m) => (Text -> m ()) -> m XMPP.Message -> Element -> m [Element]
adhocBotAnswerTextSingle sendText getMessage field = do
case attributeText (s"var") field of
Just var -> do
sendText $ s"Enter " ++ formatLabel Just field
value <- getMessage
case getBody "jabber:component:accept" value of
Just body -> return [Element (s"{jabber:x:data}field") [(s"var", [ContentText var])] [
NodeElement $ Element (s"{jabber:x:data}value") [] [NodeContent $ ContentText body]
]]
Nothing -> return []
_ -> log "ADHOC BOT FIELD WITHOUT VAR" field >> return []
listOptionText :: (Foldable t) => t Text -> Text -> (Int, Element) -> Text
listOptionText currentValues currentValueText (n, v) = tshow n ++ s". " ++ optionText v ++ selectedText v
where
selectedText option
| mconcat (fieldValue option) `elem` currentValues = currentValueText
| otherwise = mempty
adhocBotAnswerJidSingle :: (UIO.Unexceptional m) => (Text -> m ()) -> m XMPP.Message -> Element -> m [Element]
adhocBotAnswerJidSingle sendText getMessage field = do
case attributeText (s"var") field of
Just var -> do
sendText $ s"Enter " ++ formatLabel Just field
value <- untilParse getMessage (sendText helperText) XMPP.parseJID
return [Element (s"{jabber:x:data}field") [(s"var", [ContentText var])] [
NodeElement $ Element (s"{jabber:x:data}value") [] [NodeContent $ ContentText $ formatJID value]
]]
_ -> log "ADHOC BOT FIELD WITHOUT VAR" field >> return []
where
helperText = s"I didn't understand your answer. Please send only a valid JID like or perhaps just example.com"
adhocBotAnswerListMulti :: (UIO.Unexceptional m) => (Text -> m ()) -> m XMPP.Message -> Element -> m [Element]
adhocBotAnswerListMulti sendText getMessage field = do
case attributeText (s"var") field of
Just var -> do
let options = zip [1..] $ isNamed(s"{jabber:x:data}option") =<< elementChildren field
let currentValues = fieldValue field
let optionsText = fmap (listOptionText currentValues (s" [Currently Selected]")) options
sendText $ unlines $ [formatLabel (const Nothing) field] ++ optionsText ++ [s"Which numbers?"]
values <- untilParse getMessage (sendText helperText) (hush . Atto.parseOnly parser)
let selectedOptions = fmap snd $ filter (\(x, _) -> x `elem` values) options
return [Element (s"{jabber:x:data}field") [(s"var", [ContentText var])] $ flip fmap selectedOptions $ \option ->
NodeElement $ Element (s"{jabber:x:data}value") [] [NodeContent $ ContentText $ mconcat $ fieldValue option]
]
_ -> log "ADHOC BOT FIELD WITHOUT VAR" field >> return []
where
parser = Atto.skipMany Atto.space *> Atto.sepBy Atto.decimal (Atto.skipMany $ Atto.choice [Atto.space, Atto.char ',']) <* Atto.skipMany Atto.space <* Atto.endOfInput
helperText = s"I didn't understand your answer. Please send the numbers you want, separated by commas or spaces like \"1, 3\" or \"1 3\". Blank (or just spaces) to pick nothing."
data ListSingle = ListSingle { listLabel :: T.Text, listIsOpen :: Bool, listLookup :: (Int -> Maybe [T.Text]) }
listSingleHelper :: Element -> ListSingle
listSingleHelper field = ListSingle label open lookup
where
open = isOpenValidation (fieldValidation field)
options = zip [1..] $ isNamed(s"{jabber:x:data}option") =<< elementChildren field
currentValue = listToMaybe $ elementText =<< isNamed(s"{jabber:x:data}value") =<< elementChildren field
optionsText = fmap (listOptionText currentValue (s" [Current Value]")) options
currentValueText = (\func -> maybe [] func currentValue) $ \value ->
if open && value `notElem` (map (mconcat . fieldValue . snd) options) then
[s"[Current Value: " ++ value ++ s"]"]
else
[]
label = unlines $ [formatLabel (const Nothing) field] ++ optionsText ++ currentValueText
lookup n = fmap (fieldValue . snd) $ find (\(x, _) -> x == n) options
adhocBotAnswerListSingle :: (UIO.Unexceptional m) => (Text -> m ()) -> m XMPP.Message -> Element -> m [Element]
adhocBotAnswerListSingle sendText getMessage field = do
case attributeText (s"var") field of
Just var -> do
let list = listSingleHelper field
let open = listIsOpen list
let prompt = s"Please enter a number from the list above" ++ if open then s", or enter a custom option" else s""
sendText $ listLabel list ++ prompt
maybeOption <- if open then do
value <- untilParse getMessage (sendText helperText) (hush . Atto.parseOnly openParser)
return $ Just $ case value of
Left openValue -> [openValue]
Right itemNumber -> maybe ([tshow itemNumber]) id $ listLookup list itemNumber
else do
value <- untilParse getMessage (sendText helperText) (hush . Atto.parseOnly parser)
return $ listLookup list value
case maybeOption of
Just option -> return [Element (s"{jabber:x:data}field") [(s"var", [ContentText var])] [
NodeElement $ Element (s"{jabber:x:data}value") [] [NodeContent $ ContentText $ mconcat option]
]]
Nothing -> do
sendText $ s"Please pick one of the given options"
adhocBotAnswerListSingle sendText getMessage field
_ -> log "ADHOC BOT FIELD WITHOUT VAR" field >> return []
where
helperText = s"I didn't understand your answer. Please just send the number of the one item you want to pick, like \"1\""
parser = Atto.skipMany Atto.space *> Atto.decimal <* Atto.skipMany Atto.space <* Atto.endOfInput
openParser = (Right <$> parser) <|> (Left <$> Atto.takeText)
adhocBotAnswerForm :: (UIO.Unexceptional m) => (Text -> m ()) -> m XMPP.Message -> Element -> m Element
adhocBotAnswerForm sendText getMessage form = do
fields <- forM (filter (uncurry (||) . (isField &&& isInstructions)) $ elementChildren form) $ \field ->
let sendText' = lift . sendText in
withNext getMessage field $ \getMessage' ->
HT.select (
log "ADHOC BOT UNKNOWN FIELD" field >>
adhocBotAnswerTextSingle sendText' getMessage' field
) [
(isInstructions field,
sendText' (mconcat $ elementText field) >> return []),
(attributeText (s"type") field == Just (s"list-single"),
adhocBotAnswerListSingle sendText' getMessage' field),
(attributeText (s"type") field == Just (s"list-multi"),
adhocBotAnswerListMulti sendText' getMessage' field),
(attributeText (s"type") field == Just (s"jid-single"),
adhocBotAnswerJidSingle sendText' getMessage' field),
(attributeText (s"type") field == Just (s"hidden"),
return [field]),
(attributeText (s"type") field == Just (s"fixed"),
adhocBotAnswerFixed sendText' getMessage' field),
(attributeText (s"type") field == Just (s"boolean"),
adhocBotAnswerBoolean sendText' getMessage' field),
(attributeText (s"type") field `elem` [Just (s"text-single"), Nothing],
adhocBotAnswerTextSingle sendText' getMessage' field)
]
return $ Element (s"{jabber:x:data}x") [(s"type", [ContentText $ s"submit"])] $ NodeElement <$> mconcat fields
formatReported :: Element -> (Text, [Text])
formatReported =
first (intercalate (s"\t")) . unzip .
map (\field ->
(
formatLabel (const Nothing) field,
fromMaybe mempty (attributeText (s"var") field)
)
) . filter isField . elementChildren
formatItem :: [Text] -> Element -> Text
formatItem reportedVars item = intercalate (s"\t") $ map (\var ->
intercalate (s", ") $ findFieldValue var
) reportedVars
where
findFieldValue var = maybe [] fieldValue $ find (\field ->
attributeText (s"var") field == Just var
) fields
fields = filter isField $ elementChildren item
simpleResultField :: Text -> [Text] -> Text
simpleResultField label [value] = concat [label, s": ", value, s"\n"]
simpleResultField label values = concat [label, s":\n", unlines values]
formatResultField :: Element -> Text
formatResultField el = case maybe ("text-single") T.unpack $ attributeText (s"type") el of
"hidden" -> mempty
"list-single" -> listLabel $ listSingleHelper el
"jid-single" -> simpleResultField label $ map (s"xmpp:" ++) $ fieldValue el
_ -> simpleResultField label $ fieldValue el
where
label = formatLabel (const Nothing) el
renderResultForm :: Element -> Text
renderResultForm form =
intercalate (s"\n") $ catMaybes $ snd $
forAccumL [] (elementChildren form) $ \reportedVars el ->
HT.select (reportedVars, Nothing) $ map (second $ second Just) [
(isInstructions el, (reportedVars,
mconcat $ elementText el)),
(isField el, (reportedVars, formatResultField el)),
(isReported el,
swap $ formatReported el),
(isItem el, (reportedVars,
formatItem reportedVars el))
]
where
forAccumL z xs f = mapAccumL f z xs
waitForAction :: (UIO.Unexceptional m) => [Action] -> (Text -> m ()) -> m XMPP.Message -> m Action
waitForAction actions sendText getMessage = do
m <- getMessage
let ciBody = CI.mk <$> getBody (s"jabber:component:accept") m
HT.select whatWasThat [
(ciBody == Just (s"next"), return ActionNext),
(ciBody == Just (s"back"), return ActionPrev),
(ciBody == Just (s"cancel"), return ActionCancel),
(ciBody == Just (s"finish"), return ActionComplete)
]
where
allowedCmds = map actionCmd (ActionCancel : actions)
whatWasThat = do
sendText $
s"I didn't understand that. You can say one of: " ++
intercalate (s", ") allowedCmds
waitForAction actions sendText getMessage
label :: Element -> Maybe Text
label = attributeText (s"label")
optionText :: Element -> Text
optionText element = fromMaybe (mconcat $ fieldValue element) (label element)
fieldValue :: Element -> [Text]
fieldValue = fmap (mconcat . elementText) .
isNamed (s"{jabber:x:data}value") <=< elementChildren
fieldValidation :: Element -> Maybe Element
fieldValidation =
listToMaybe .
(isNamed (s"{-validate}validate") <=< elementChildren)
isOpenValidation :: Maybe Element -> Bool
isOpenValidation (Just el) =
not $ null $
isNamed (s"{-validate}open")
=<< elementChildren el
isOpenValidation _ = False
desc :: Element -> Maybe Text
desc = mfilter (not . T.null) . Just . mconcat .
(elementText <=< isNamed(s"{jabber:x:data}desc") <=< elementChildren)
isField :: Element -> Bool
isField el = elementName el == s"{jabber:x:data}field"
isInstructions :: Element -> Bool
isInstructions el = elementName el == s"{jabber:x:data}instructions"
isReported :: Element -> Bool
isReported el = elementName el == s"{jabber:x:data}reported"
isItem :: Element -> Bool
isItem el = elementName el == s"{jabber:x:data}item"
isRequired :: Element -> Bool
isRequired = not . null . (isNamed (s"{jabber:x:data}required") <=< elementChildren)
registerShorthand :: Text -> Maybe JID
registerShorthand body = do
gatewayJID <- hush $ Atto.parseOnly (Atto.asciiCI (s"register") *> Atto.many1 Atto.space *> Atto.takeText) body
parseJID gatewayJID
getServerInfoForm :: [XML.Element] -> Maybe XML.Element
getServerInfoForm = find (\el ->
attributeText (s"type") el == Just (s"result") &&
getFormField el (s"FORM_TYPE") == Just (s"")
) . (isNamed (s"{jabber:x:data}x") =<<)
sendHelp :: (UIO.Unexceptional m) =>
DB.DB
-> JID
-> (XMPP.Message -> m ())
-> (XMPP.IQ -> UIO.UIO (STM (Maybe XMPP.IQ)))
-> JID
-> JID
-> m ()
sendHelp db componentJid sendMessage sendIQ from routeFrom = do
maybeRoute <- (parseJID =<<) . (join . hush) <$> UIO.fromIO (DB.get db (DB.byJid from ["direct-message-route"]))
case maybeRoute of
Just route -> do
replySTM <- UIO.lift $ sendIQ $ queryCommandList' route routeFrom
discoInfoSTM <- UIO.lift $ sendIQ $ queryDiscoWithNode' Nothing route routeFrom
(mreply, mDiscoInfo) <- atomicUIO $ (,) <$> replySTM <*> discoInfoSTM
let helpMessage = botHelp
(renderResultForm <$> (getServerInfoForm . elementChildren =<< iqPayload =<< mDiscoInfo)) $
commandList componentJid Nothing componentJid from $
isNamed (s"{#items}item") =<< elementChildren =<< maybeToList (XMPP.iqPayload =<< mfilter ((== XMPP.IQResult) . XMPP.iqType) mreply)
case helpMessage of
Just msg -> sendMessage msg
Nothing -> log "INVALID HELP MESSAGE" mreply
Nothing ->
case botHelp Nothing $ commandList componentJid Nothing componentJid from [] of
Just msg -> sendMessage msg
Nothing -> log "INVALID HELP MESSAGE" ()
adhocBotRunCommand :: (UIO.Unexceptional m) => DB.DB -> JID -> JID -> (XMPP.Message -> m ()) -> (XMPP.IQ -> UIO.UIO (STM (Maybe XMPP.IQ))) -> STM XMPP.Message -> JID -> Text -> [Element] -> m ()
adhocBotRunCommand db componentJid routeFrom sendMessage sendIQ getMessage from body cmdEls = do
let (nodes, cmds) = unzip $ mapMaybe (\el -> (,) <$> attributeText (s"node") el <*> pure el) cmdEls
case (snd <$> find (\(prefixes, _) -> Set.member (CI.mk body) prefixes) (zip (uniquePrefix nodes) cmds), registerShorthand body) of
(_, Just gatewayJID) -> do
mResult <- (atomicUIO =<<) $ UIO.lift $ sendIQ $ (emptyIQ IQSet) {
iqFrom = Just routeFrom,
iqTo = Just componentJid,
iqPayload = Just $ Element (s"{}command") [(s"node", [ContentText ConfigureDirectMessageRoute.nodeName])] []
}
case attributeText (s"sessionid") =<< iqPayload =<< mResult of
Just sessionid ->
startWithIntro $ (emptyIQ IQSet) {
iqFrom = Just routeFrom,
iqTo = iqFrom =<< mResult,
iqPayload = Just $ Element (s"{}command") [(s"node", [ContentText ConfigureDirectMessageRoute.nodeName]), (s"sessionid", [ContentText sessionid]), (s"action", [ContentText $ s"next"])] [
NodeElement $ Element (fromString "{jabber:x:data}x") [
(fromString "{jabber:x:data}type", [ContentText $ s"submit"])
] [
NodeElement $ Element (fromString "{jabber:x:data}field") [
(fromString "{jabber:x:data}type", [ContentText $ s"jid-single"]),
(fromString "{jabber:x:data}var", [ContentText $ s"gateway-jid"])
] [
NodeElement $ Element (fromString "{jabber:x:data}value") [] [NodeContent $ ContentText $ formatJID gatewayJID]
]
]
]
}
Nothing -> sendHelp db componentJid sendMessage sendIQ from routeFrom
(Just cmd, Nothing) ->
startWithIntro $ (emptyIQ IQSet) {
iqFrom = Just routeFrom,
iqTo = parseJID =<< attributeText (s"jid") cmd,
iqPayload = Just $ Element (s"{}command") [(s"node", [ContentText $ fromMaybe mempty $ attributeText (s"node") cmd])] []
}
(Nothing, Nothing) -> sendHelp db componentJid sendMessage sendIQ from routeFrom
where
startWithIntro cmdIQ =
sendAndRespondTo (Just $ intercalate (s"\n") [
s"You can leave something at the current value by saying 'next'.",
s"You can return to the main menu by saying 'cancel' at any time."
]) cmdIQ
threadedMessage Nothing msg = msg
threadedMessage (Just sessionid) msg = msg { messagePayloads = (Element (s"thread") [] [NodeContent $ ContentText sessionid]) : messagePayloads msg }
continueExecution resultIQ responseBody
| Just payload <- iqPayload resultIQ,
Just sessionid <- attributeText (s"sessionid") payload,
Just "executing" <- T.unpack <$> attributeText (s"status") payload = do
let actions = listToMaybe $ isNamed(s"{}actions") =<< elementChildren payload
let defaultAction = maybe (s"execute") (fromMaybe (s"next") . attributeText (s"execute")) actions
let cmdIQ' = (emptyIQ IQSet) {
iqFrom = Just routeFrom,
iqTo = iqFrom resultIQ,
iqPayload = Just $ Element (s"{}command") [(s"node", [ContentText $ fromMaybe mempty $ attributeText (s"node") payload]), (s"sessionid", [ContentText sessionid]), (s"action", [ContentText defaultAction])] responseBody
}
sendAndRespondTo Nothing cmdIQ'
continueExecution _ _ = return ()
sendAndRespondTo intro cmdIQ = do
mcmdResult <- atomicUIO =<< UIO.lift (sendIQ cmdIQ)
case mcmdResult of
Just resultIQ
| IQResult == iqType resultIQ,
Just payload <- iqPayload resultIQ,
[form] <- isNamed (s"{jabber:x:data}x") =<< elementChildren payload,
attributeText (s"type") form == Just (s"result") -> do
let sendText = sendMessage . threadedMessage (attributeText (s"sessionid") payload) . mkSMS componentJid from
sendText $ renderResultForm form
continueExecution resultIQ []
| IQResult == iqType resultIQ,
Just payload <- iqPayload resultIQ,
Just sessionid <- attributeText (s"sessionid") payload,
Just cmd <- attributeText (s"node") payload,
[form] <- isNamed (s"{jabber:x:data}x") =<< elementChildren payload -> do
let cancelIQ = (emptyIQ IQSet) {
iqFrom = Just routeFrom,
iqTo = iqFrom resultIQ,
iqPayload = Just $ Element (s"{}command") [(s"node", [ContentText cmd]), (s"sessionid", [ContentText sessionid]), (s"action", [ContentText $ s"cancel"])] []
}
let cancel = void . atomicUIO =<< UIO.lift (sendIQ cancelIQ)
let sendText = sendMessage . threadedMessage (Just sessionid) . mkSMS componentJid from
let cancelText = sendText . ((cmd ++ s" ") ++)
forM_ intro sendText
returnForm <- adhocBotAnswerForm sendText (withCancel sessionLifespan cancelText cancel getMessage) form
continueExecution resultIQ [NodeElement returnForm]
| IQResult == iqType resultIQ,
Just payload <- iqPayload resultIQ,
notes@(_:_) <- isNamed (s"{}note") =<< elementChildren payload -> do
let sendText = sendMessage . threadedMessage (attributeText (s"sessionid") payload) . mkSMS componentJid from
forM_ notes $
sendText . mconcat . elementText
if (attributeText (s"status") payload == Just (s"executing")) then do
let actions = mapMaybe (actionFromXMPP . XML.nameLocalName . elementName) $ elementChildren =<< isNamed (s"{}actions") =<< elementChildren payload
let sessionid = maybe [] (\sessid -> [(s"sessionid", [ContentText sessid])]) $ attributeText (s"sessionid") payload
sendText $
s"You can say one of: " ++
(intercalate (s", ") $ map actionCmd (ActionCancel : actions))
action <- waitForAction actions sendText (atomicUIO getMessage)
let cmdIQ' = (emptyIQ IQSet) {
iqFrom = Just routeFrom,
iqTo = iqFrom resultIQ,
iqPayload = Just $ Element (s"{}command") ([(s"node", [ContentText $ fromMaybe mempty $ attributeText (s"node") payload]), (s"action", [actionContent action])] ++ sessionid) []
}
sendAndRespondTo Nothing cmdIQ'
else when (
attributeText (s"status") payload == Just (s"completed") &&
attributeText (s"node") payload == Just ConfigureDirectMessageRoute.nodeName &&
all (\n -> attributeText (s"type") n /= Just (s"error")) notes
) $
sendHelp db componentJid sendMessage sendIQ from routeFrom
| IQResult == iqType resultIQ,
[cmd] <- isNamed (s"{}command") =<< (justZ $ iqPayload resultIQ),
attributeText (s"status") cmd `elem` [Just (s"completed"), Just (s"canceled")] -> return ()
| otherwise -> do
log "COMMAND ERROR" resultIQ
sendMessage $ mkSMS componentJid from (s"Command error")
Nothing -> sendMessage $ mkSMS componentJid from (s"Command timed out")
adhocBotSession :: (UIO.Unexceptional m) => DB.DB -> JID -> (XMPP.Message -> m ()) -> (XMPP.IQ -> UIO.UIO (STM (Maybe XMPP.IQ))) -> STM XMPP.Message -> XMPP.Message-> m ()
adhocBotSession db componentJid sendMessage sendIQ getMessage message@(XMPP.Message { XMPP.messageFrom = Just from })
| Just body <- getBody "jabber:component:accept" message = do
maybeRoute <- (parseJID =<<) . (join . hush) <$> UIO.fromIO (DB.get db (DB.byJid from ["direct-message-route"]))
case maybeRoute of
Just route -> do
mreply <- atomicUIO =<< (UIO.lift . sendIQ) (queryCommandList' route routeFrom)
case iqPayload =<< mfilter ((==IQResult) . iqType) mreply of
Just reply -> adhocBotRunCommand db componentJid routeFrom sendMessage' sendIQ getMessage from body $ maskCommands componentJid $ elementChildren reply ++ internalCommands
Nothing -> adhocBotRunCommand db componentJid routeFrom sendMessage' sendIQ getMessage from body internalCommands
Nothing -> adhocBotRunCommand db componentJid routeFrom sendMessage' sendIQ getMessage from body internalCommands
| otherwise = sendHelp db componentJid sendMessage' sendIQ from routeFrom
where
internalCommands = elementChildren =<< maybeToList (iqPayload $ commandList componentJid Nothing componentJid from [])
Just routeFrom = parseJID $ escapeJid (bareTxt from) ++ s"@" ++ formatJID componentJid ++ s"/adhocbot"
sendMessage' = sendMessage <=< addOriginUUID
adhocBotSession _ _ _ _ _ m = log "BAD ADHOC BOT MESSAGE" m
|
4f36aee86d5a859b86afcc7c92247bab381cdd6413e886f488e309fe2b4c709f | Deducteam/Logipedia | environ.ml | open Ast
open Kernel.Basic
module Basic = Kernel.Basic
module Term = Kernel.Term
let package = ref ""
let set_package file =
let path = Filename.dirname file in
let sep = Filename.dir_sep in
assert (String.length sep = 1);
let sep = String.get sep 0 in
match List.rev @@ String.split_on_char sep path with
| [] -> failwith "Files should be in a directory (which is interpreted as a package)"
| x::_ -> package := x
let renaming = ref true
Use a list rather than a sec for
type proof_ctx = (string * _te) list
(* k counts lambas, used for renaming *)
type env =
{k: int; dk: Term.typed_context; ty: ty_ctx; te: te_ctx; prf: proof_ctx}
let empty_env = {k= 0; dk= []; ty= []; te= []; prf= []}
let soi = string_of_ident
let rec gen_fresh_rec ctx avoid x c =
let x' = if c < 0 then x else x ^ string_of_int c in
if List.exists (fun (_, v, _) -> soi v = x') ctx || List.mem x' avoid
then gen_fresh_rec ctx avoid x (c + 1)
else mk_ident x'
let gen_fresh env avoid x = gen_fresh_rec env.dk avoid (soi x) (-1)
let mk_ident = mk_ident
let string_of_ident = string_of_ident
let of_name name = (string_of_mident (md name), string_of_ident (id name))
let name_of cst = Basic.mk_name (Basic.mk_mident (fst cst)) (Basic.mk_ident (snd cst))
let add_ty_var env var =
let open Basic in
let open Sttfadk in
{ env with
k= env.k + 1
; ty= var :: env.ty
; dk=
(dloc, mk_ident var, Term.mk_Const dloc (mk_name sttfa_module sttfa_type))
:: env.dk }
let add_ty_var_dk env var =
add_ty_var env (soi var)
let add_te_var env var ty' =
let open Basic in
let ty = Decompile.decompile__type env.dk ty' in
let ty = Decompile.to__type ty in
{ env with
k = env.k + 1;
te= (var, ty') :: env.te; dk= (dloc, mk_ident var, ty) :: env.dk
}
let add_te_var_dk env var ty' = add_te_var env (soi var) ty'
let add_prf_ctx env id _te _te' =
{ env with
k= env.k + 1
; prf= (id, _te') :: env.prf
; dk= (Basic.dloc, mk_ident id, _te) :: env.dk }
let get_dk_var env n =
let _, x, _ = List.nth env.dk n in
soi x
let rec take i l =
if i = 0 then []
else match l with [] -> assert false | x :: l -> x :: take (i - 1) l
let rec drop i l =
if i = 0 then l
else match l with [] -> assert false | _ :: l -> drop (i - 1) l
module StringxType = struct
type t = string * _ty
let compare = Stdlib.compare
end
module TVars = Set . Make(StringxType )
module TVars = Set.Make(StringxType)
*)
module Vars = Set.Make(String)
let gen_fresh_set varlist v =
let rec gen_fresh_set_rec varlist v i =
let new_v = v^(string_of_int i) in
if List.mem new_v varlist then
gen_fresh_set_rec varlist v (i+1)
else new_v
in
gen_fresh_set_rec varlist v 0
let variables t =
let rec variables_rec set_var = function
TeVar(s) -> Vars.add s set_var
| Abs(v,_,t) -> variables_rec (Vars.add v set_var) t
| App(t1,t2) -> Vars.union (variables_rec set_var t1) (variables_rec set_var t2)
| Forall(v,_,t) -> variables_rec (Vars.add v set_var) t
| Impl(t1,t2) -> Vars.union (variables_rec set_var t1) (variables_rec set_var t2)
| AbsTy(_,t) -> variables_rec set_var t
| Cst _ -> set_var
in
variables_rec Vars.empty t
let benign_spec = function
| TeVar(v1),Forall(v2 , _ , _ ) when v1 = v2- > true
| _ - > false
let benign_spec = function
| TeVar(v1),Forall(v2,_,_) when v1=v2-> true
| _ -> false*)
let frees t =
let rec frees_rec set_var = function
TeVar(s) -> Vars.add s set_var
| Abs(v,_,t) ->
let set_vars_t = frees_rec set_var t in
Vars.union set_var (Vars.remove v set_vars_t)
| App(t1,t2) ->
Vars.union (frees_rec set_var t1) (frees_rec set_var t2)
| Forall(v,_,t) ->
let set_vars_t = frees_rec set_var t in
Vars.union set_var (Vars.remove v set_vars_t)
| Impl(t1,t2) -> Vars.union (frees_rec set_var t1) (frees_rec set_var t2)
| AbsTy(_,t) -> frees_rec set_var t
| Cst _ -> set_var
in
frees_rec Vars.empty t
let frees_ty ty =
let rec frees_ty_rec set_var = function
TyVar(s) -> Vars.add s set_var
| Arrow(tyl,tyr) ->
Vars.union (frees_ty_rec set_var tyl) (frees_ty_rec set_var tyr)
| TyOp(_,tys) ->
let list_var_tys = List.map (frees_ty_rec set_var) tys in
let set_vars_tys = List.fold_left (fun s1 -> fun s2 -> Vars.union s1 s2) Vars.empty list_var_tys in
set_vars_tys
| Prop -> Vars.empty
in
frees_ty_rec Vars.empty ty
let deep_alpha varlist t set_var =
let rename_list = List.map ( fun v - > ( v , gen_fresh_set set_var v ) ) varlist in
let rename v = List.assoc v rename_list in
let rec deep_alpha_rec = function
TeVar(v ) when List.mem v varlist - > TeVar(rename v )
| Abs(v , ty , t ) when List.mem v varlist - > Abs(rename v , ty , deep_alpha_rec t )
| Abs(v , ty , t ) - > Abs(v , ty , deep_alpha_rec t )
| App(t1,t2 ) - > App(deep_alpha_rec t1,deep_alpha_rec t2 )
| Forall(v , ty , t ) when List.mem v varlist - > Forall(rename v , ty , deep_alpha_rec t )
| Forall(v , ty , t ) when List.mem v varlist - > Forall(rename v , ty , deep_alpha_rec t )
| Impl(t1,t2 ) - > Impl(deep_alpha_rec t1,deep_alpha_rec t2 )
| AbsTy(_,t ) - > t
| t - > t in
deep_alpha_rec t
let resolve_spec_conflict t1 t2 =
if benign_spec(t1,t2 ) then t2
else
let frees_t1 = Vars.elements ( frees t1 ) in
let variables_t2 = variables t2 in
deep_alpha frees_t1 t2 ( Vars.elements variables_t2 )
let rename_list = List.map (fun v -> (v,gen_fresh_set set_var v)) varlist in
let rename v = List.assoc v rename_list in
let rec deep_alpha_rec = function
TeVar(v) when List.mem v varlist -> TeVar(rename v)
| Abs(v,ty,t) when List.mem v varlist -> Abs(rename v,ty,deep_alpha_rec t)
| Abs(v,ty,t) -> Abs(v,ty,deep_alpha_rec t)
| App(t1,t2) -> App(deep_alpha_rec t1,deep_alpha_rec t2)
| Forall(v,ty,t) when List.mem v varlist -> Forall(rename v,ty,deep_alpha_rec t)
| Forall(v,ty,t) when List.mem v varlist -> Forall(rename v,ty,deep_alpha_rec t)
| Impl(t1,t2) -> Impl(deep_alpha_rec t1,deep_alpha_rec t2)
| AbsTy(_,t) -> t
| t -> t in
deep_alpha_rec t
let resolve_spec_conflict t1 t2 =
if benign_spec(t1,t2) then t2
else
let frees_t1 = Vars.elements (frees t1) in
let variables_t2 = variables t2 in
deep_alpha frees_t1 t2 (Vars.elements variables_t2)
*) | null | https://raw.githubusercontent.com/Deducteam/Logipedia/09797a35ae36ab671e40e615fcdc09a7bba69134/src/sttfa/environ.ml | ocaml | k counts lambas, used for renaming | open Ast
open Kernel.Basic
module Basic = Kernel.Basic
module Term = Kernel.Term
let package = ref ""
let set_package file =
let path = Filename.dirname file in
let sep = Filename.dir_sep in
assert (String.length sep = 1);
let sep = String.get sep 0 in
match List.rev @@ String.split_on_char sep path with
| [] -> failwith "Files should be in a directory (which is interpreted as a package)"
| x::_ -> package := x
let renaming = ref true
Use a list rather than a sec for
type proof_ctx = (string * _te) list
type env =
{k: int; dk: Term.typed_context; ty: ty_ctx; te: te_ctx; prf: proof_ctx}
let empty_env = {k= 0; dk= []; ty= []; te= []; prf= []}
let soi = string_of_ident
let rec gen_fresh_rec ctx avoid x c =
let x' = if c < 0 then x else x ^ string_of_int c in
if List.exists (fun (_, v, _) -> soi v = x') ctx || List.mem x' avoid
then gen_fresh_rec ctx avoid x (c + 1)
else mk_ident x'
let gen_fresh env avoid x = gen_fresh_rec env.dk avoid (soi x) (-1)
let mk_ident = mk_ident
let string_of_ident = string_of_ident
let of_name name = (string_of_mident (md name), string_of_ident (id name))
let name_of cst = Basic.mk_name (Basic.mk_mident (fst cst)) (Basic.mk_ident (snd cst))
let add_ty_var env var =
let open Basic in
let open Sttfadk in
{ env with
k= env.k + 1
; ty= var :: env.ty
; dk=
(dloc, mk_ident var, Term.mk_Const dloc (mk_name sttfa_module sttfa_type))
:: env.dk }
let add_ty_var_dk env var =
add_ty_var env (soi var)
let add_te_var env var ty' =
let open Basic in
let ty = Decompile.decompile__type env.dk ty' in
let ty = Decompile.to__type ty in
{ env with
k = env.k + 1;
te= (var, ty') :: env.te; dk= (dloc, mk_ident var, ty) :: env.dk
}
let add_te_var_dk env var ty' = add_te_var env (soi var) ty'
let add_prf_ctx env id _te _te' =
{ env with
k= env.k + 1
; prf= (id, _te') :: env.prf
; dk= (Basic.dloc, mk_ident id, _te) :: env.dk }
let get_dk_var env n =
let _, x, _ = List.nth env.dk n in
soi x
let rec take i l =
if i = 0 then []
else match l with [] -> assert false | x :: l -> x :: take (i - 1) l
let rec drop i l =
if i = 0 then l
else match l with [] -> assert false | _ :: l -> drop (i - 1) l
module StringxType = struct
type t = string * _ty
let compare = Stdlib.compare
end
module TVars = Set . Make(StringxType )
module TVars = Set.Make(StringxType)
*)
module Vars = Set.Make(String)
let gen_fresh_set varlist v =
let rec gen_fresh_set_rec varlist v i =
let new_v = v^(string_of_int i) in
if List.mem new_v varlist then
gen_fresh_set_rec varlist v (i+1)
else new_v
in
gen_fresh_set_rec varlist v 0
let variables t =
let rec variables_rec set_var = function
TeVar(s) -> Vars.add s set_var
| Abs(v,_,t) -> variables_rec (Vars.add v set_var) t
| App(t1,t2) -> Vars.union (variables_rec set_var t1) (variables_rec set_var t2)
| Forall(v,_,t) -> variables_rec (Vars.add v set_var) t
| Impl(t1,t2) -> Vars.union (variables_rec set_var t1) (variables_rec set_var t2)
| AbsTy(_,t) -> variables_rec set_var t
| Cst _ -> set_var
in
variables_rec Vars.empty t
let benign_spec = function
| TeVar(v1),Forall(v2 , _ , _ ) when v1 = v2- > true
| _ - > false
let benign_spec = function
| TeVar(v1),Forall(v2,_,_) when v1=v2-> true
| _ -> false*)
let frees t =
let rec frees_rec set_var = function
TeVar(s) -> Vars.add s set_var
| Abs(v,_,t) ->
let set_vars_t = frees_rec set_var t in
Vars.union set_var (Vars.remove v set_vars_t)
| App(t1,t2) ->
Vars.union (frees_rec set_var t1) (frees_rec set_var t2)
| Forall(v,_,t) ->
let set_vars_t = frees_rec set_var t in
Vars.union set_var (Vars.remove v set_vars_t)
| Impl(t1,t2) -> Vars.union (frees_rec set_var t1) (frees_rec set_var t2)
| AbsTy(_,t) -> frees_rec set_var t
| Cst _ -> set_var
in
frees_rec Vars.empty t
let frees_ty ty =
let rec frees_ty_rec set_var = function
TyVar(s) -> Vars.add s set_var
| Arrow(tyl,tyr) ->
Vars.union (frees_ty_rec set_var tyl) (frees_ty_rec set_var tyr)
| TyOp(_,tys) ->
let list_var_tys = List.map (frees_ty_rec set_var) tys in
let set_vars_tys = List.fold_left (fun s1 -> fun s2 -> Vars.union s1 s2) Vars.empty list_var_tys in
set_vars_tys
| Prop -> Vars.empty
in
frees_ty_rec Vars.empty ty
let deep_alpha varlist t set_var =
let rename_list = List.map ( fun v - > ( v , gen_fresh_set set_var v ) ) varlist in
let rename v = List.assoc v rename_list in
let rec deep_alpha_rec = function
TeVar(v ) when List.mem v varlist - > TeVar(rename v )
| Abs(v , ty , t ) when List.mem v varlist - > Abs(rename v , ty , deep_alpha_rec t )
| Abs(v , ty , t ) - > Abs(v , ty , deep_alpha_rec t )
| App(t1,t2 ) - > App(deep_alpha_rec t1,deep_alpha_rec t2 )
| Forall(v , ty , t ) when List.mem v varlist - > Forall(rename v , ty , deep_alpha_rec t )
| Forall(v , ty , t ) when List.mem v varlist - > Forall(rename v , ty , deep_alpha_rec t )
| Impl(t1,t2 ) - > Impl(deep_alpha_rec t1,deep_alpha_rec t2 )
| AbsTy(_,t ) - > t
| t - > t in
deep_alpha_rec t
let resolve_spec_conflict t1 t2 =
if benign_spec(t1,t2 ) then t2
else
let frees_t1 = Vars.elements ( frees t1 ) in
let variables_t2 = variables t2 in
deep_alpha frees_t1 t2 ( Vars.elements variables_t2 )
let rename_list = List.map (fun v -> (v,gen_fresh_set set_var v)) varlist in
let rename v = List.assoc v rename_list in
let rec deep_alpha_rec = function
TeVar(v) when List.mem v varlist -> TeVar(rename v)
| Abs(v,ty,t) when List.mem v varlist -> Abs(rename v,ty,deep_alpha_rec t)
| Abs(v,ty,t) -> Abs(v,ty,deep_alpha_rec t)
| App(t1,t2) -> App(deep_alpha_rec t1,deep_alpha_rec t2)
| Forall(v,ty,t) when List.mem v varlist -> Forall(rename v,ty,deep_alpha_rec t)
| Forall(v,ty,t) when List.mem v varlist -> Forall(rename v,ty,deep_alpha_rec t)
| Impl(t1,t2) -> Impl(deep_alpha_rec t1,deep_alpha_rec t2)
| AbsTy(_,t) -> t
| t -> t in
deep_alpha_rec t
let resolve_spec_conflict t1 t2 =
if benign_spec(t1,t2) then t2
else
let frees_t1 = Vars.elements (frees t1) in
let variables_t2 = variables t2 in
deep_alpha frees_t1 t2 (Vars.elements variables_t2)
*) |
7e89acc4c5da8ad9d6d2771ceb2d1b83e2324767c23e1998ce3b8d8337eab4c8 | karlhof26/gimp-scheme | Landscape Painter.scm | ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
;; Landscape Painter ;;
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
; A GIMP script-fu to generate a painted look to a
; landscape photo. Sometimes provides an interesting
; effect on portraits and animal shots.
;
; Creates a top layer set to Darken Only mode and
; then blurs it. Varying the blur radius will change
; the effect, as will applying a Levels or Curves adjustment
; to the Darken Only layer. Just play with it and see
; what you get!
;
Tested on 2.10.20
4/12/2006 and 11/9/2020
;
License :
; This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation , either version 3 of the License , or
; (at your option) any later version.
;
; This program is distributed in the hope that it will be useful,
; but WITHOUT ANY WARRANTY; without even the implied warranty of
; MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
; GNU General Public License for more details.
;
To view a copy of the GNU General Public License
; visit:
;
;
(define (script-fu-Land_paint img drawable blur-rad merge-flag )
(let* (
(darken-layer 0)
)
; Start an undo group. Everything between the start and the end
; will be carried out if an undo command is issued.
(gimp-image-undo-group-start img)
;; CREATE THE DARKEN ONLY LAYER ;;
; Create a new layer
(set! darken-layer (car (gimp-layer-copy drawable 0)))
; Give it a name
(gimp-drawable-set-name darken-layer "Darken Only layer")
; Add the new layer to the image
(gimp-image-insert-layer img darken-layer 0 0)
Set opacity to 100 %
(gimp-layer-set-opacity darken-layer 100)
(gimp-layer-set-mode darken-layer LAYER-MODE-DARKEN-ONLY)
;
;
; Blur the layer
(if (> blur-rad 0)
(plug-in-gauss-iir 1 img darken-layer blur-rad 1 1 )
()
)
;
;
; NOW MERGE EVERYTHING DOWN IF DESIRED
(if (equal? merge-flag TRUE)
(set! merged-layer (car(gimp-image-merge-down img darken-layer 1 )))
()
)
(if (equal? merge-flag TRUE)
(gimp-drawable-set-name merged-layer "Result of Landscape Painter")
()
)
; Complete the undo group
(gimp-image-undo-group-end img)
; Flush the display
(gimp-displays-flush)
)
)
(script-fu-register "script-fu-Land_paint"
"<Toolbox>/Script-Fu/Effects/Landscape Painter DarkenLayer"
"Add Darken Only layer and blur it \nfile:Landscape Painter.scm"
"Script by Mark Lowry"
"Technique by Mark Lowry"
"2006"
"RGB*, GRAY*"
SF-IMAGE "Image" 0
SF-DRAWABLE "Current Layer" 0
SF-VALUE "Blur radius?" "15"
SF-TOGGLE "Merge Layers?" FALSE
)
;end of script | null | https://raw.githubusercontent.com/karlhof26/gimp-scheme/2a1258bc3771287f83bd0ae94b6e7341a7bb4371/Landscape%20Painter.scm | scheme |
Landscape Painter ;;
A GIMP script-fu to generate a painted look to a
landscape photo. Sometimes provides an interesting
effect on portraits and animal shots.
Creates a top layer set to Darken Only mode and
then blurs it. Varying the blur radius will change
the effect, as will applying a Levels or Curves adjustment
to the Darken Only layer. Just play with it and see
what you get!
This program is free software: you can redistribute it and/or modify
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
visit:
Start an undo group. Everything between the start and the end
will be carried out if an undo command is issued.
CREATE THE DARKEN ONLY LAYER ;;
Create a new layer
Give it a name
Add the new layer to the image
Blur the layer
NOW MERGE EVERYTHING DOWN IF DESIRED
Complete the undo group
Flush the display
end of script | Tested on 2.10.20
4/12/2006 and 11/9/2020
License :
it under the terms of the GNU General Public License as published by
the Free Software Foundation , either version 3 of the License , or
To view a copy of the GNU General Public License
(define (script-fu-Land_paint img drawable blur-rad merge-flag )
(let* (
(darken-layer 0)
)
(gimp-image-undo-group-start img)
(set! darken-layer (car (gimp-layer-copy drawable 0)))
(gimp-drawable-set-name darken-layer "Darken Only layer")
(gimp-image-insert-layer img darken-layer 0 0)
Set opacity to 100 %
(gimp-layer-set-opacity darken-layer 100)
(gimp-layer-set-mode darken-layer LAYER-MODE-DARKEN-ONLY)
(if (> blur-rad 0)
(plug-in-gauss-iir 1 img darken-layer blur-rad 1 1 )
()
)
(if (equal? merge-flag TRUE)
(set! merged-layer (car(gimp-image-merge-down img darken-layer 1 )))
()
)
(if (equal? merge-flag TRUE)
(gimp-drawable-set-name merged-layer "Result of Landscape Painter")
()
)
(gimp-image-undo-group-end img)
(gimp-displays-flush)
)
)
(script-fu-register "script-fu-Land_paint"
"<Toolbox>/Script-Fu/Effects/Landscape Painter DarkenLayer"
"Add Darken Only layer and blur it \nfile:Landscape Painter.scm"
"Script by Mark Lowry"
"Technique by Mark Lowry"
"2006"
"RGB*, GRAY*"
SF-IMAGE "Image" 0
SF-DRAWABLE "Current Layer" 0
SF-VALUE "Blur radius?" "15"
SF-TOGGLE "Merge Layers?" FALSE
)
|
2bc153a2a26c6949e4ed2e35b0d784670d316598404da04d3c01cbd53c93587d | arcfide/oleg | read-NCName-ss.scm | ;*****************************************************************
; read-NCName-ss
; A version of SSAX:read-NCName that uses next-token-of-as-symb
;
$ I d : read - NCName - ss.scm , v 1.4 2003/04/29 01:40:35 oleg Exp $
(directives
Adding substring->symbol implemented in substring - symbol.c
(extern
(macro substring->symbol::symbol (::bstring ::int ::int)
"substring_to_symbol")
(include "substring-symbol.c"))
(pragma
(substring->symbol no-cfa-top nesting))
)
; The same as next-token-of with the exception that it returns its result
; as a symbol
(define (next-token-of-as-symb incl-list/pred port)
(let* ((buffer (input-parse:init-buffer))
(curr-buf-len (string-length buffer)))
(if (procedure? incl-list/pred)
(let outer ((buffer buffer) (filled-buffer-l '()))
(let loop ((i 0))
(if (>= i curr-buf-len) ; make sure we have space
(outer (make-string curr-buf-len) (cons buffer filled-buffer-l))
(let ((c (incl-list/pred (peek-char port))))
(if c
(begin
(string-set! buffer i c)
(read-char port) ; move to the next char
(loop (++ i)))
; incl-list/pred decided it had had enough
(if (null? filled-buffer-l) (substring->symbol buffer 0 i)
(string->symbol
(string-concatenate-reverse filled-buffer-l buffer i))))))))
; incl-list/pred is a list of allowed characters
(let outer ((buffer buffer) (filled-buffer-l '()))
(let loop ((i 0))
(if (>= i curr-buf-len) ; make sure we have space
(outer (make-string curr-buf-len) (cons buffer filled-buffer-l))
(let ((c (peek-char port)))
(cond
((not (memv c incl-list/pred))
(if (null? filled-buffer-l) (substring->symbol buffer 0 i)
(string->symbol
(string-concatenate-reverse filled-buffer-l buffer i))))
(else
(string-set! buffer i c)
(read-char port) ; move to the next char
(loop (++ i))))))))
)))
; A version of SSAX:read-NCName that uses next-token-of-as-symb
; Read a NCName starting from the current position in the PORT and
; return it as a symbol.
(define (ssax:read-NCName port)
(let ((first-char (peek-char port)))
(or (ssax:ncname-starting-char? first-char)
(parser-error port "XMLNS [4] for '" first-char "'")))
(next-token-of-as-symb
(lambda (c)
(cond
((eof-object? c) #f)
((char-alphabetic? c) c)
((string-index "0123456789.-_" c) c)
(else #f)))
port))
| null | https://raw.githubusercontent.com/arcfide/oleg/c6826870436925fd4c873c01d7fcc24a7a7f95dc/ssax/benchmarks/read-NCName-ss.scm | scheme | *****************************************************************
read-NCName-ss
A version of SSAX:read-NCName that uses next-token-of-as-symb
The same as next-token-of with the exception that it returns its result
as a symbol
make sure we have space
move to the next char
incl-list/pred decided it had had enough
incl-list/pred is a list of allowed characters
make sure we have space
move to the next char
A version of SSAX:read-NCName that uses next-token-of-as-symb
Read a NCName starting from the current position in the PORT and
return it as a symbol. | $ I d : read - NCName - ss.scm , v 1.4 2003/04/29 01:40:35 oleg Exp $
(directives
Adding substring->symbol implemented in substring - symbol.c
(extern
(macro substring->symbol::symbol (::bstring ::int ::int)
"substring_to_symbol")
(include "substring-symbol.c"))
(pragma
(substring->symbol no-cfa-top nesting))
)
(define (next-token-of-as-symb incl-list/pred port)
(let* ((buffer (input-parse:init-buffer))
(curr-buf-len (string-length buffer)))
(if (procedure? incl-list/pred)
(let outer ((buffer buffer) (filled-buffer-l '()))
(let loop ((i 0))
(outer (make-string curr-buf-len) (cons buffer filled-buffer-l))
(let ((c (incl-list/pred (peek-char port))))
(if c
(begin
(string-set! buffer i c)
(loop (++ i)))
(if (null? filled-buffer-l) (substring->symbol buffer 0 i)
(string->symbol
(string-concatenate-reverse filled-buffer-l buffer i))))))))
(let outer ((buffer buffer) (filled-buffer-l '()))
(let loop ((i 0))
(outer (make-string curr-buf-len) (cons buffer filled-buffer-l))
(let ((c (peek-char port)))
(cond
((not (memv c incl-list/pred))
(if (null? filled-buffer-l) (substring->symbol buffer 0 i)
(string->symbol
(string-concatenate-reverse filled-buffer-l buffer i))))
(else
(string-set! buffer i c)
(loop (++ i))))))))
)))
(define (ssax:read-NCName port)
(let ((first-char (peek-char port)))
(or (ssax:ncname-starting-char? first-char)
(parser-error port "XMLNS [4] for '" first-char "'")))
(next-token-of-as-symb
(lambda (c)
(cond
((eof-object? c) #f)
((char-alphabetic? c) c)
((string-index "0123456789.-_" c) c)
(else #f)))
port))
|
6cce95b1a5e54427b0ba25f072fa2636a8f6d11971728705883f3927907075f7 | rd--/hsc3 | analogTape.help.hs | -- analogTape
let freq = control kr "freq" 110
width = control kr "width" 0.5
bias = control kr "bias" 0.5
saturation = control kr "saturation" 0.5
drive = control kr "drive" 0.5
sig = varSaw ar freq 0 width
in mce2 (sig * 0.05) (X.analogTape sig bias saturation drive 1 0 * 0.1)
-- analogTape ; event control
let bias = control kr "bias" 0.25
saturation = control kr "saturation" 0.25
drive = control kr "drive" 0.25
f (_,g,x,y,z,o,rx,ry,_,_,_) =
let freq = midiCps (x * 12 + 48)
width = y
sig = varSaw ar freq 0 width
in pan2 (X.analogTape sig (bias + rx) (saturation + ry) (drive + o) 1 0) (o * 2 - 1) (z * g)
in mix (voicer 12 f) * control kr "gain" 1
---- ; drawings
UI.ui_sc3_scope 2 0 (2 ^ 14) 0.75 "audio" 0
| null | https://raw.githubusercontent.com/rd--/hsc3/e1c9377f4985f4cc209a9cb11421bc8f1686c6f2/Help/Ugen/analogTape.help.hs | haskell | analogTape
analogTape ; event control
-- ; drawings | let freq = control kr "freq" 110
width = control kr "width" 0.5
bias = control kr "bias" 0.5
saturation = control kr "saturation" 0.5
drive = control kr "drive" 0.5
sig = varSaw ar freq 0 width
in mce2 (sig * 0.05) (X.analogTape sig bias saturation drive 1 0 * 0.1)
let bias = control kr "bias" 0.25
saturation = control kr "saturation" 0.25
drive = control kr "drive" 0.25
f (_,g,x,y,z,o,rx,ry,_,_,_) =
let freq = midiCps (x * 12 + 48)
width = y
sig = varSaw ar freq 0 width
in pan2 (X.analogTape sig (bias + rx) (saturation + ry) (drive + o) 1 0) (o * 2 - 1) (z * g)
in mix (voicer 12 f) * control kr "gain" 1
UI.ui_sc3_scope 2 0 (2 ^ 14) 0.75 "audio" 0
|
6b95595deeaea4956dcf0049c7f796762ef5d4584fe626df82572474716924b2 | macourtney/Conjure | test_view_test_generator.clj | (ns conjure.script.generators.test-view-test-generator
(:use clojure.test
conjure.script.generators.view-test-generator)
(:require [conjure.view.util :as view-util]))
(deftest test-generate-standard-content
(is (generate-test-content "test" "show"))) | null | https://raw.githubusercontent.com/macourtney/Conjure/1d6cb22d321ea75af3a6abe2a5bc140ad36e20d1/conjure_script_view/test/conjure/script/generators/test_view_test_generator.clj | clojure | (ns conjure.script.generators.test-view-test-generator
(:use clojure.test
conjure.script.generators.view-test-generator)
(:require [conjure.view.util :as view-util]))
(deftest test-generate-standard-content
(is (generate-test-content "test" "show"))) | |
5351ed44d26165a713ddf8c43ce46f084c3ccd171f8c9f734b8d298e9ba0194f | johnwhitington/camlpdf | pdfread.mli | (** Reading PDF Files *)
(** Read a PDF from a [Pdfio.input], with an optional user password which, if
absent, is assumed to be the empty string, and optional owner password. *)
val pdf_of_input : ?revision:int -> string option -> string option -> Pdfio.input -> Pdf.t
(** Same as [pdf_of_input], but delay loading of streams and parsing of objects
(they will be loaded and parsed when needed). Useful if we only intend to do
something simple, like read metadata. *)
val pdf_of_input_lazy : ?revision:int -> string option -> string option -> Pdfio.input -> Pdf.t
(** Same as [pdf_of_input], but from an OCaml channel. *)
val pdf_of_channel : ?revision:int -> ?source:string -> string option -> string option -> in_channel -> Pdf.t
(** As [pdf_of_channel], but delay loading of streams and parsing of objects like [pdf_of_input_lazy]. *)
val pdf_of_channel_lazy : ?revision:int -> ?source:string -> string option -> string option -> in_channel -> Pdf.t
(** Read a PDF from the given filename with optional user and owner passwords. *)
val pdf_of_file : ?revision:int -> string option -> string option -> string -> Pdf.t
* { 2 Configuration and debugging }
(** If set, some debug output is produced. *)
val read_debug : bool ref
(** If set, a malformed PDF will cause an error, not an attempt to read using
the malformed PDF reader. *)
val error_on_malformed : bool ref
(** If set, we always use the malformed PDF reader. For debug. *)
val debug_always_treat_malformed : bool ref
* { 2 Low level functions }
* Read the number of revisions of the document , by performing a dummy read . For
example , if this function returns 3 , then appropriate values to pass to
[ ? revision ] in a subsequent call to pdf_of_input are 1 , 2 , and 3 .
example, if this function returns 3, then appropriate values to pass to
[?revision] in a subsequent call to pdf_of_input are 1, 2, and 3. *)
val revisions : Pdfio.input -> int
(** Return encryption method in use *)
val what_encryption : Pdf.t -> Pdfwrite.encryption_method option
(** Return list of permissions *)
val permissions : Pdf.t -> Pdfcrypt.permission list
(** Given a filename, see if the file is linearized. *)
val is_linearized : Pdfio.input -> bool
(** Read a PDF header *)
val read_header : Pdfio.input -> int * int
(* Read characters until a PDF delimiter. *)
val getuntil_white_or_delimiter : Pdfio.input -> char list
(* Read characters until a PDF delimiter, returned as a string. *)
val getuntil_white_or_delimiter_string : Pdfio.input -> string
(** Read characters until a predicate is true. If the boolean is set, end of
input is considered a delimiter. *)
val getuntil : bool -> (char -> bool) -> Pdfio.input -> char list
(** Throw away characters until a predicate is true. If the boolean is set, end
of input is considered a delimiter. *)
val ignoreuntil : bool -> (char -> bool) -> Pdfio.input -> unit
(** Drop whitespace characters from an input. *)
val dropwhite : Pdfio.input -> unit
* a name , assuming there is one to lex .
val lex_name : Pdfio.input -> Pdfgenlex.t
(** Lex a number, assuming there is one to lex. *)
val lex_number : Pdfio.input -> Pdfgenlex.t
* a string , assuming there is one to lex .
val lex_string : Pdfio.input -> Pdfgenlex.t
(** Lex a hexadecimal string, assuming there is one to lex. *)
val lex_hexstring : Pdfio.input -> Pdfgenlex.t
* a comment , assuming there is one to lex .
val lex_comment : Pdfio.input -> Pdfgenlex.t
* a dictinonary , assuming there is one to lex .
val lex_dictionary : Pdfio.input -> Pdfgenlex.t list
(** Lex stream data of the given length. If the boolean is true, then actually
read the data. If not, merely record the intention to. *)
val lex_stream_data : Pdfio.input -> int -> bool -> Pdfgenlex.t
* a PDF object . If [ failure_is_ok ] is set , a null object with high
object number is returned , instead of an exception being raised .
object number is returned, instead of an exception being raised. *)
val parse : ?failure_is_ok:bool -> Pdfgenlex.t list -> int * Pdf.pdfobject
* a single object .
val parse_single_object : string -> Pdf.pdfobject
* String representation of a lexeme
val string_of_lexeme : Pdfgenlex.t -> string
(** Print a lexeme to Standard Output with a space after it, for debug. *)
val print_lexeme : Pdfgenlex.t -> unit
(** / **)
val endpage : (Pdf.t -> int) ref
| null | https://raw.githubusercontent.com/johnwhitington/camlpdf/ebc3061176d7218c9aac113a0bbbb6289836154b/pdfread.mli | ocaml | * Reading PDF Files
* Read a PDF from a [Pdfio.input], with an optional user password which, if
absent, is assumed to be the empty string, and optional owner password.
* Same as [pdf_of_input], but delay loading of streams and parsing of objects
(they will be loaded and parsed when needed). Useful if we only intend to do
something simple, like read metadata.
* Same as [pdf_of_input], but from an OCaml channel.
* As [pdf_of_channel], but delay loading of streams and parsing of objects like [pdf_of_input_lazy].
* Read a PDF from the given filename with optional user and owner passwords.
* If set, some debug output is produced.
* If set, a malformed PDF will cause an error, not an attempt to read using
the malformed PDF reader.
* If set, we always use the malformed PDF reader. For debug.
* Return encryption method in use
* Return list of permissions
* Given a filename, see if the file is linearized.
* Read a PDF header
Read characters until a PDF delimiter.
Read characters until a PDF delimiter, returned as a string.
* Read characters until a predicate is true. If the boolean is set, end of
input is considered a delimiter.
* Throw away characters until a predicate is true. If the boolean is set, end
of input is considered a delimiter.
* Drop whitespace characters from an input.
* Lex a number, assuming there is one to lex.
* Lex a hexadecimal string, assuming there is one to lex.
* Lex stream data of the given length. If the boolean is true, then actually
read the data. If not, merely record the intention to.
* Print a lexeme to Standard Output with a space after it, for debug.
* / * |
val pdf_of_input : ?revision:int -> string option -> string option -> Pdfio.input -> Pdf.t
val pdf_of_input_lazy : ?revision:int -> string option -> string option -> Pdfio.input -> Pdf.t
val pdf_of_channel : ?revision:int -> ?source:string -> string option -> string option -> in_channel -> Pdf.t
val pdf_of_channel_lazy : ?revision:int -> ?source:string -> string option -> string option -> in_channel -> Pdf.t
val pdf_of_file : ?revision:int -> string option -> string option -> string -> Pdf.t
* { 2 Configuration and debugging }
val read_debug : bool ref
val error_on_malformed : bool ref
val debug_always_treat_malformed : bool ref
* { 2 Low level functions }
* Read the number of revisions of the document , by performing a dummy read . For
example , if this function returns 3 , then appropriate values to pass to
[ ? revision ] in a subsequent call to pdf_of_input are 1 , 2 , and 3 .
example, if this function returns 3, then appropriate values to pass to
[?revision] in a subsequent call to pdf_of_input are 1, 2, and 3. *)
val revisions : Pdfio.input -> int
val what_encryption : Pdf.t -> Pdfwrite.encryption_method option
val permissions : Pdf.t -> Pdfcrypt.permission list
val is_linearized : Pdfio.input -> bool
val read_header : Pdfio.input -> int * int
val getuntil_white_or_delimiter : Pdfio.input -> char list
val getuntil_white_or_delimiter_string : Pdfio.input -> string
val getuntil : bool -> (char -> bool) -> Pdfio.input -> char list
val ignoreuntil : bool -> (char -> bool) -> Pdfio.input -> unit
val dropwhite : Pdfio.input -> unit
* a name , assuming there is one to lex .
val lex_name : Pdfio.input -> Pdfgenlex.t
val lex_number : Pdfio.input -> Pdfgenlex.t
* a string , assuming there is one to lex .
val lex_string : Pdfio.input -> Pdfgenlex.t
val lex_hexstring : Pdfio.input -> Pdfgenlex.t
* a comment , assuming there is one to lex .
val lex_comment : Pdfio.input -> Pdfgenlex.t
* a dictinonary , assuming there is one to lex .
val lex_dictionary : Pdfio.input -> Pdfgenlex.t list
val lex_stream_data : Pdfio.input -> int -> bool -> Pdfgenlex.t
* a PDF object . If [ failure_is_ok ] is set , a null object with high
object number is returned , instead of an exception being raised .
object number is returned, instead of an exception being raised. *)
val parse : ?failure_is_ok:bool -> Pdfgenlex.t list -> int * Pdf.pdfobject
* a single object .
val parse_single_object : string -> Pdf.pdfobject
* String representation of a lexeme
val string_of_lexeme : Pdfgenlex.t -> string
val print_lexeme : Pdfgenlex.t -> unit
val endpage : (Pdf.t -> int) ref
|
ce0c1aa52fc37692632050e25077ab4fc4b47f7e942d62dabe0af4601dba2c5b | vmchale/dickinson | Useless.hs | {-# LANGUAGE OverloadedStrings #-}
| This module is loosely based off /Warnings for pattern matching/ by
Maranget
module Language.Dickinson.Pattern.Useless ( PatternM
, PatternEnv
, runPatternM
, isExhaustive
, patternEnvDecls
, useful
-- * Exported for testing
, specializeTuple
, specializeTag
) where
import Control.Monad (forM_)
import Control.Monad.State.Strict (State, execState)
import Data.Coerce (coerce)
import Data.Foldable (toList, traverse_)
import Data.Functor (void)
import Data.IntMap.Strict (findWithDefault)
import qualified Data.IntMap.Strict as IM
import qualified Data.IntSet as IS
import Language.Dickinson.Name
import Language.Dickinson.Type
import Language.Dickinson.Unique
import Lens.Micro (Lens')
import Lens.Micro.Mtl (modifying)
-- all constructors of a
data PatternEnv = PatternEnv { allCons :: IM.IntMap IS.IntSet -- ^ all constructors indexed by type
, types :: IM.IntMap Int -- ^ all types indexed by constructor
}
allConsLens :: Lens' PatternEnv (IM.IntMap IS.IntSet)
allConsLens f s = fmap (\x -> s { allCons = x }) (f (allCons s))
typesLens :: Lens' PatternEnv (IM.IntMap Int)
typesLens f s = fmap (\x -> s { types = x }) (f (types s))
declAdd :: Declaration a -> PatternM ()
declAdd Define{} = pure ()
declAdd (TyDecl _ (Name _ (Unique i) _) cs) = do
forM_ cs $ \(Name _ (Unique j) _) ->
modifying typesLens (IM.insert j i)
let cons = IS.fromList $ toList (unUnique . unique <$> cs)
modifying allConsLens (IM.insert i cons)
patternEnvDecls :: [Declaration a] -> PatternM ()
patternEnvDecls = traverse_ declAdd
-- TODO: just reader monad... writer at beginning?
type PatternM = State PatternEnv
runPatternM :: PatternM a -> PatternEnv
runPatternM = flip execState (PatternEnv mempty mempty)
given a constructor name , get the of all constructors of that type
assocUniques :: PatternEnv -> Name a -> IS.IntSet
assocUniques env (Name _ (Unique i) _) = {-# SCC "assocUniques" #-}
let ty = findWithDefault internalError i (types env)
in findWithDefault internalError ty (allCons env)
internalError :: a
internalError = error "Internal error: lookup in a PatternEnv failed"
isExhaustive :: PatternEnv -> [Pattern a] -> Bool
isExhaustive env ps = {-# SCC "isExhaustive" #-} not $ useful env ps (Wildcard undefined)
isCompleteSet :: PatternEnv -> [Name a] -> Maybe [Name ()]
isCompleteSet _ [] = Nothing
isCompleteSet env ns@(n:_) =
let allU = assocUniques env n
ty = coerce (unique <$> ns)
in if IS.null (allU IS.\\ IS.fromList ty)
then Just ((\u -> Name undefined (Unique u) ()) <$> IS.toList allU)
else Nothing
useful :: PatternEnv -> [Pattern a] -> Pattern a -> Bool
useful env ps p = usefulMaranget env [[p'] | p' <- ps] [p]
sanityFailed :: a
sanityFailed = error "Sanity check failed! Perhaps you ran the pattern match exhaustiveness checker on an ill-typed program?"
specializeTag :: Name a -> [[Pattern a]] -> [[Pattern a]]
specializeTag c = concatMap withRow
where withRow (PatternCons _ c':ps) | c' == c = [ps]
| otherwise = []
withRow (PatternTuple{}:_) = sanityFailed
withRow (Wildcard{}:ps) = [ps]
withRow (PatternVar{}:ps) = [ps]
withRow (OrPattern _ rs:ps) = specializeTag c [r:ps | r <- toList rs] -- TODO: unit test case for this
withRow [] = emptySpecialize
specializeTuple :: Int -> [[Pattern a]] -> [[Pattern a]]
specializeTuple n = concatMap withRow
where withRow (PatternTuple _ ps:ps') = [toList ps ++ ps']
withRow (p@Wildcard{}:ps') = [replicate n p ++ ps']
withRow (p@PatternVar{}:ps') = [replicate n p ++ ps']
withRow (OrPattern _ rs:ps) = specializeTuple n [r:ps | r <- toList rs]
withRow (PatternCons{}:_) = sanityFailed
withRow [] = emptySpecialize
emptySpecialize :: a
emptySpecialize = error "Internal error: tried to take specialized matrix of an empty row"
| \\ ( ) \\ ) in the Maranget paper
defaultMatrix :: [[Pattern a]] -> [[Pattern a]]
defaultMatrix = concatMap withRow where
withRow [] = error "Internal error: tried to take default matrix of an empty row"
withRow (PatternTuple{}:_) = error "Sanity check failed!" -- because a tuple would be complete by itself
withRow (PatternCons{}:_) = []
withRow (Wildcard{}:ps) = [ps]
withRow (PatternVar{}:ps) = [ps]
withRow (OrPattern _ rs:ps) = defaultMatrix [r:ps | r <- toList rs]
data Complete a = NotComplete
| CompleteTuple Int
| CompleteTags [Name a]
extrCons :: Pattern a -> [Name a]
extrCons (PatternCons _ c) = [c]
extrCons (OrPattern _ ps) = concatMap extrCons (toList ps)
extrCons _ = []
Is the first column of the pattern matrix complete ?
fstComplete :: PatternEnv -> [[Pattern a]] -> Complete ()
# SCC " " #
if maxTupleLength > 0
then CompleteTuple maxTupleLength
else maybe NotComplete CompleteTags
$ isCompleteSet env (concatMap extrCons fstColumn)
where fstColumn = fmap head ps
tuple (PatternTuple _ ps') = length ps'
tuple (OrPattern _ ps') = maximum (tuple <$> ps')
tuple _ = 0
maxTupleLength = maximum (tuple <$> fstColumn)
follows maranget paper
usefulMaranget :: PatternEnv -> [[Pattern a]] -> [Pattern a] -> Bool
usefulMaranget _ [] _ = True
usefulMaranget _ _ [] = False
usefulMaranget env ps (PatternCons _ c:qs) = usefulMaranget env (specializeTag c ps) qs
usefulMaranget env ps (PatternTuple _ ps':qs) = usefulMaranget env (specializeTuple (length ps') ps) (toList ps' ++ qs)
usefulMaranget env ps (OrPattern _ ps':qs) = any (\p -> usefulMaranget env ps (p:qs)) ps'
usefulMaranget env ps (q:qs) = -- var or wildcard
let cont = fstComplete env ps in
case cont of
NotComplete -> usefulMaranget env (defaultMatrix ps) qs
CompleteTuple n -> usefulMaranget env (specializeTuple n ps) (specializeTupleVector n q qs)
CompleteTags ns -> or $ fmap (\n -> usefulMaranget env (specializeTag n (forget ps)) (fmap void qs)) ns
specializeTupleVector :: Int -> Pattern a -> [Pattern a] -> [Pattern a]
# SCC " specializeTupleVector " #
forget :: [[Pattern a]] -> [[Pattern ()]]
forget = fmap (fmap void)
| null | https://raw.githubusercontent.com/vmchale/dickinson/5b7d9d48bdb2aa902cddc0161dca3a6e7f3e0aea/src/Language/Dickinson/Pattern/Useless.hs | haskell | # LANGUAGE OverloadedStrings #
* Exported for testing
all constructors of a
^ all constructors indexed by type
^ all types indexed by constructor
TODO: just reader monad... writer at beginning?
# SCC "assocUniques" #
# SCC "isExhaustive" #
TODO: unit test case for this
because a tuple would be complete by itself
var or wildcard |
| This module is loosely based off /Warnings for pattern matching/ by
Maranget
module Language.Dickinson.Pattern.Useless ( PatternM
, PatternEnv
, runPatternM
, isExhaustive
, patternEnvDecls
, useful
, specializeTuple
, specializeTag
) where
import Control.Monad (forM_)
import Control.Monad.State.Strict (State, execState)
import Data.Coerce (coerce)
import Data.Foldable (toList, traverse_)
import Data.Functor (void)
import Data.IntMap.Strict (findWithDefault)
import qualified Data.IntMap.Strict as IM
import qualified Data.IntSet as IS
import Language.Dickinson.Name
import Language.Dickinson.Type
import Language.Dickinson.Unique
import Lens.Micro (Lens')
import Lens.Micro.Mtl (modifying)
}
allConsLens :: Lens' PatternEnv (IM.IntMap IS.IntSet)
allConsLens f s = fmap (\x -> s { allCons = x }) (f (allCons s))
typesLens :: Lens' PatternEnv (IM.IntMap Int)
typesLens f s = fmap (\x -> s { types = x }) (f (types s))
declAdd :: Declaration a -> PatternM ()
declAdd Define{} = pure ()
declAdd (TyDecl _ (Name _ (Unique i) _) cs) = do
forM_ cs $ \(Name _ (Unique j) _) ->
modifying typesLens (IM.insert j i)
let cons = IS.fromList $ toList (unUnique . unique <$> cs)
modifying allConsLens (IM.insert i cons)
patternEnvDecls :: [Declaration a] -> PatternM ()
patternEnvDecls = traverse_ declAdd
type PatternM = State PatternEnv
runPatternM :: PatternM a -> PatternEnv
runPatternM = flip execState (PatternEnv mempty mempty)
given a constructor name , get the of all constructors of that type
assocUniques :: PatternEnv -> Name a -> IS.IntSet
let ty = findWithDefault internalError i (types env)
in findWithDefault internalError ty (allCons env)
internalError :: a
internalError = error "Internal error: lookup in a PatternEnv failed"
isExhaustive :: PatternEnv -> [Pattern a] -> Bool
isCompleteSet :: PatternEnv -> [Name a] -> Maybe [Name ()]
isCompleteSet _ [] = Nothing
isCompleteSet env ns@(n:_) =
let allU = assocUniques env n
ty = coerce (unique <$> ns)
in if IS.null (allU IS.\\ IS.fromList ty)
then Just ((\u -> Name undefined (Unique u) ()) <$> IS.toList allU)
else Nothing
useful :: PatternEnv -> [Pattern a] -> Pattern a -> Bool
useful env ps p = usefulMaranget env [[p'] | p' <- ps] [p]
sanityFailed :: a
sanityFailed = error "Sanity check failed! Perhaps you ran the pattern match exhaustiveness checker on an ill-typed program?"
specializeTag :: Name a -> [[Pattern a]] -> [[Pattern a]]
specializeTag c = concatMap withRow
where withRow (PatternCons _ c':ps) | c' == c = [ps]
| otherwise = []
withRow (PatternTuple{}:_) = sanityFailed
withRow (Wildcard{}:ps) = [ps]
withRow (PatternVar{}:ps) = [ps]
withRow [] = emptySpecialize
specializeTuple :: Int -> [[Pattern a]] -> [[Pattern a]]
specializeTuple n = concatMap withRow
where withRow (PatternTuple _ ps:ps') = [toList ps ++ ps']
withRow (p@Wildcard{}:ps') = [replicate n p ++ ps']
withRow (p@PatternVar{}:ps') = [replicate n p ++ ps']
withRow (OrPattern _ rs:ps) = specializeTuple n [r:ps | r <- toList rs]
withRow (PatternCons{}:_) = sanityFailed
withRow [] = emptySpecialize
emptySpecialize :: a
emptySpecialize = error "Internal error: tried to take specialized matrix of an empty row"
| \\ ( ) \\ ) in the Maranget paper
defaultMatrix :: [[Pattern a]] -> [[Pattern a]]
defaultMatrix = concatMap withRow where
withRow [] = error "Internal error: tried to take default matrix of an empty row"
withRow (PatternCons{}:_) = []
withRow (Wildcard{}:ps) = [ps]
withRow (PatternVar{}:ps) = [ps]
withRow (OrPattern _ rs:ps) = defaultMatrix [r:ps | r <- toList rs]
data Complete a = NotComplete
| CompleteTuple Int
| CompleteTags [Name a]
extrCons :: Pattern a -> [Name a]
extrCons (PatternCons _ c) = [c]
extrCons (OrPattern _ ps) = concatMap extrCons (toList ps)
extrCons _ = []
Is the first column of the pattern matrix complete ?
fstComplete :: PatternEnv -> [[Pattern a]] -> Complete ()
# SCC " " #
if maxTupleLength > 0
then CompleteTuple maxTupleLength
else maybe NotComplete CompleteTags
$ isCompleteSet env (concatMap extrCons fstColumn)
where fstColumn = fmap head ps
tuple (PatternTuple _ ps') = length ps'
tuple (OrPattern _ ps') = maximum (tuple <$> ps')
tuple _ = 0
maxTupleLength = maximum (tuple <$> fstColumn)
follows maranget paper
usefulMaranget :: PatternEnv -> [[Pattern a]] -> [Pattern a] -> Bool
usefulMaranget _ [] _ = True
usefulMaranget _ _ [] = False
usefulMaranget env ps (PatternCons _ c:qs) = usefulMaranget env (specializeTag c ps) qs
usefulMaranget env ps (PatternTuple _ ps':qs) = usefulMaranget env (specializeTuple (length ps') ps) (toList ps' ++ qs)
usefulMaranget env ps (OrPattern _ ps':qs) = any (\p -> usefulMaranget env ps (p:qs)) ps'
let cont = fstComplete env ps in
case cont of
NotComplete -> usefulMaranget env (defaultMatrix ps) qs
CompleteTuple n -> usefulMaranget env (specializeTuple n ps) (specializeTupleVector n q qs)
CompleteTags ns -> or $ fmap (\n -> usefulMaranget env (specializeTag n (forget ps)) (fmap void qs)) ns
specializeTupleVector :: Int -> Pattern a -> [Pattern a] -> [Pattern a]
# SCC " specializeTupleVector " #
forget :: [[Pattern a]] -> [[Pattern ()]]
forget = fmap (fmap void)
|
5656dfafa5f8f50f7e49ce0aa332483a1f2fda8638155ead232d1451f55246bf | 2600hz/kazoo | kazoo_globals_init.erl | %%%-----------------------------------------------------------------------------
( C ) 2010 - 2020 , 2600Hz
%%% @doc Wait for Globals
@author
This Source Code Form is subject to the terms of the Mozilla Public
License , v. 2.0 . If a copy of the MPL was not distributed with this
file , You can obtain one at /.
%%%
%%% @end
%%%-----------------------------------------------------------------------------
-module(kazoo_globals_init).
-export([start_link/0]).
-include("kazoo_globals.hrl").
-spec start_link() -> kz_types:startlink_ret().
start_link() ->
wait_for_globals('false').
wait_for_globals('true') ->
lager:info("kazoo globals is ready"),
'ignore';
wait_for_globals('false') ->
timer:sleep(?MILLISECONDS_IN_SECOND),
wait_for_globals(kz_globals:is_ready()).
| null | https://raw.githubusercontent.com/2600hz/kazoo/24519b9af9792caa67f7c09bbb9d27e2418f7ad6/core/kazoo_globals/src/kazoo_globals_init.erl | erlang | -----------------------------------------------------------------------------
@doc Wait for Globals
@end
----------------------------------------------------------------------------- | ( C ) 2010 - 2020 , 2600Hz
@author
This Source Code Form is subject to the terms of the Mozilla Public
License , v. 2.0 . If a copy of the MPL was not distributed with this
file , You can obtain one at /.
-module(kazoo_globals_init).
-export([start_link/0]).
-include("kazoo_globals.hrl").
-spec start_link() -> kz_types:startlink_ret().
start_link() ->
wait_for_globals('false').
wait_for_globals('true') ->
lager:info("kazoo globals is ready"),
'ignore';
wait_for_globals('false') ->
timer:sleep(?MILLISECONDS_IN_SECOND),
wait_for_globals(kz_globals:is_ready()).
|
fedefa77e591c041ec8eb32503ad9720723a9a138cf23c54a8750a5108d491b5 | ghollisjr/cl-ana | package.lisp | cl - ana is a Common Lisp data analysis library .
Copyright 2013 - 2015
;;;;
This file is part of cl - ana .
;;;;
;;;; cl-ana is free software: you can redistribute it and/or modify it
under the terms of the GNU General Public License as published by
the Free Software Foundation , either version 3 of the License , or
;;;; (at your option) any later version.
;;;;
;;;; cl-ana is distributed in the hope that it will be useful, but
;;;; WITHOUT ANY WARRANTY; without even the implied warranty of
;;;; MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
;;;; General Public License for more details.
;;;;
You should have received a copy of the GNU General Public License
;;;; along with cl-ana. If not, see </>.
;;;;
You may contact ( me ! ) via email at
;;;;
(defpackage #:cl-ana.makeres
(:use :cl
:external-program
:cl-ana.memoization
:cl-ana.error-propogation
:cl-ana.hdf-utils
:cl-ana.macro-utils
:cl-ana.list-utils
:cl-ana.symbol-utils
:cl-ana.map
:cl-ana.functional-utils
:cl-ana.file-utils
:cl-ana.string-utils
:cl-ana.serialization
:cl-ana.histogram
:cl-ana.pathname-utils
:cl-ana.table
:cl-ana.reusable-table
:cl-ana.hash-table-utils
:cl-ana.plotting)
(:export
;; target
:target
:target-id
:target-expr
:target-deps
:target-pdeps
:target-val
:target-stat
:target-timestamp
:make-target
:copy-target
;; propogation:
:transforms-propagate
:res-dependencies
:res-dependents
:makeres-set-auto-propagate
:makeres-propagate!
:makeres-set-sticky-pars
;; hash tables (these are for debugging only
:*symbol-tables
:*target-tables*
:*fin-target-tables*
:*project-id*
:*transformation-table*
:*params-table*
:*args-tables*
:*makeres-args*
;; noisy messages:
:*makeres-warnings*
;; functions accessing hash tables
:project
:project-parameters
:project-targets
:symbol-table
:target-table
:copy-target-table
;; dependency sorting:
:depmap
:dep<
:depsort
:depsort-graph
:topological-sort
:target-table-edge-map
:decompress-edge-map
:compress-edge-map
:invert-edge-map
;; target and parameter macros
:res
:resfn
:par
:parfn
:mres
:with-mres
:remakeres
;; project macros
:defproject
:in-project
:defpars
:undefpars
:defres
:defres-uniq
:undefres
:setresfn
:setres
:unsetresfn
:unsetres
:unsetdeps
:unsetdepsfn
:clrres
:clrresfn
:settrans ; set transformation pipeline
:transform-target-table ; transforms table via pipeline
:makeres-form ; returns the lambda form to perform the computation
:makeres ; compile and call result generator
:makeres-naive ; makeres without any transformation pipeline
;; project utilities
:target-ids
:fin-target-ids
;; INCLUDED TRANSFORMATIONS:
;; Logical targets:
:lrestrans
:lres
;; Transformation utilities:
:*trans->propogator-fn*
:*copy-target-table-p*
:defpropogator
;; logres:
:save-target
:load-target
:load-target-manual
:unload-target
:commit-form
:define-save-target-method
:define-load-target-method
:load-object
:save-object
:cleanup
:project-path
:set-project-path
:save-project
:load-project
:checkout-version
:logres-ignore
:logres-ignorefn
:logres-ignore-by
:logres-track
:logres-trackfn
:logres-track-by
:function-target?
:printable
:current-path
:target-path
:work-path
Snapshot Control :
:save-snapshot
:load-snapshot
;;; Caching:
:defcache
:init-logged-stats
;; Strategies:
:open-cache
:singleton-cache
:fixed-cache
Utilities :
:checkres
:pruneres
:purgeres
:printres
:mvres
:evres
:evresfn))
(cl-ana.gmath:use-gmath :cl-ana.makeres)
| null | https://raw.githubusercontent.com/ghollisjr/cl-ana/5cb4c0b0c9c4957452ad2a769d6ff9e8d5df0b10/makeres/package.lisp | lisp |
cl-ana is free software: you can redistribute it and/or modify it
(at your option) any later version.
cl-ana is distributed in the hope that it will be useful, but
WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
General Public License for more details.
along with cl-ana. If not, see </>.
target
propogation:
hash tables (these are for debugging only
noisy messages:
functions accessing hash tables
dependency sorting:
target and parameter macros
project macros
set transformation pipeline
transforms table via pipeline
returns the lambda form to perform the computation
compile and call result generator
makeres without any transformation pipeline
project utilities
INCLUDED TRANSFORMATIONS:
Logical targets:
Transformation utilities:
logres:
Caching:
Strategies: | cl - ana is a Common Lisp data analysis library .
Copyright 2013 - 2015
This file is part of cl - ana .
under the terms of the GNU General Public License as published by
the Free Software Foundation , either version 3 of the License , or
You should have received a copy of the GNU General Public License
You may contact ( me ! ) via email at
(defpackage #:cl-ana.makeres
(:use :cl
:external-program
:cl-ana.memoization
:cl-ana.error-propogation
:cl-ana.hdf-utils
:cl-ana.macro-utils
:cl-ana.list-utils
:cl-ana.symbol-utils
:cl-ana.map
:cl-ana.functional-utils
:cl-ana.file-utils
:cl-ana.string-utils
:cl-ana.serialization
:cl-ana.histogram
:cl-ana.pathname-utils
:cl-ana.table
:cl-ana.reusable-table
:cl-ana.hash-table-utils
:cl-ana.plotting)
(:export
:target
:target-id
:target-expr
:target-deps
:target-pdeps
:target-val
:target-stat
:target-timestamp
:make-target
:copy-target
:transforms-propagate
:res-dependencies
:res-dependents
:makeres-set-auto-propagate
:makeres-propagate!
:makeres-set-sticky-pars
:*symbol-tables
:*target-tables*
:*fin-target-tables*
:*project-id*
:*transformation-table*
:*params-table*
:*args-tables*
:*makeres-args*
:*makeres-warnings*
:project
:project-parameters
:project-targets
:symbol-table
:target-table
:copy-target-table
:depmap
:dep<
:depsort
:depsort-graph
:topological-sort
:target-table-edge-map
:decompress-edge-map
:compress-edge-map
:invert-edge-map
:res
:resfn
:par
:parfn
:mres
:with-mres
:remakeres
:defproject
:in-project
:defpars
:undefpars
:defres
:defres-uniq
:undefres
:setresfn
:setres
:unsetresfn
:unsetres
:unsetdeps
:unsetdepsfn
:clrres
:clrresfn
:target-ids
:fin-target-ids
:lrestrans
:lres
:*trans->propogator-fn*
:*copy-target-table-p*
:defpropogator
:save-target
:load-target
:load-target-manual
:unload-target
:commit-form
:define-save-target-method
:define-load-target-method
:load-object
:save-object
:cleanup
:project-path
:set-project-path
:save-project
:load-project
:checkout-version
:logres-ignore
:logres-ignorefn
:logres-ignore-by
:logres-track
:logres-trackfn
:logres-track-by
:function-target?
:printable
:current-path
:target-path
:work-path
Snapshot Control :
:save-snapshot
:load-snapshot
:defcache
:init-logged-stats
:open-cache
:singleton-cache
:fixed-cache
Utilities :
:checkres
:pruneres
:purgeres
:printres
:mvres
:evres
:evresfn))
(cl-ana.gmath:use-gmath :cl-ana.makeres)
|
3be2bd054df4e826776f82f417da3b8019451825ff97368accba9008519a54a1 | theschemer/libra | http-cgi.scm | ;;; "http-cgi.scm" service HTTP or CGI requests. -*-scheme-*-
Copyright 1997 , 1998 , 2000 , 2001 , 2003
;
;Permission to copy this software, to modify it, to redistribute it,
;to distribute modified versions, and to use it for any purpose is
;granted, subject to the following restrictions and understandings.
;
1 . Any copy made of this software must include this copyright notice
;in full.
;
2 . I have made no warranty or representation that the operation of
;this software will be error-free, and I am under no obligation to
;provide any services, by way of maintenance, update, or otherwise.
;
3 . In conjunction with products arising from the use of this
;material, there shall be no use of my name in any advertising,
;promotional, or sales literature without prior written consent in
;each case.
(library (libra server http-cgi)
(export
http:read-header
http:read-query-string
http:status-line
http:header
http:content
http:error-page
http:serve-query
http:read-start-line
http:read-request-line
html:head
html:body)
(import
(scheme)
(socket socket)
(irregex irregex))
;;@code{(require 'http)} or @code{(require 'cgi)}
;;@ftindex http
;;@ftindex cgi
(define http:crlf (string (integer->char 13) #\newline))
(define (http:read-header port)
(define alist '())
(do ((line (read-line port) (read-line port)))
((or (zero? (string-length line))
(and (= 1 (string-length line))
(char-whitespace? (string-ref line 0)))
(eof-object? line))
(if (and (= 1 (string-length line))
(char-whitespace? (string-ref line 0)))
(set! http:crlf (string (string-ref line 0) #\newline)))
(if (eof-object? line) line alist))
(let ((len (string-length line))
(idx (string-index line #\:)))
(if (char-whitespace? (string-ref line (+ -1 len)))
(set! len (+ -1 len)))
(and idx
(do ((idx2 (+ idx 1) (+ idx2 1)))
((or (>= idx2 len)
(not (char-whitespace? (string-ref line idx2))))
(set! alist
(cons
(cons (string-ci->symbol (substring line 0 idx))
(substring line idx2 len))
alist)))
)
)
)
)
)
(define (http:read-query-string request-line header port)
(case (car request-line)
((get head)
(let* ((request-uri (cadr request-line))
(len (string-length request-uri)))
(and
(> len 3)
(string-index request-uri #\?)
(substring request-uri
(+ 1 (string-index request-uri #\?))
(if (eqv? #\/ (string-ref request-uri (+ -1 len)))
(+ -1 len)
len)))))
((post put delete)
(let ((content-length (assq 'content-length header)))
(and
content-length
(set! content-length (string->number (cdr content-length))))
(and
content-length
(let ((str (make-string content-length #\space)))
(do ((idx 0 (+ idx 1)))
((>= idx content-length)
(if (>= idx (string-length str))
str
(substring str 0 idx)))
(let ((chr (read-char port)))
(if (char? chr)
(string-set! str idx chr)
(set! content-length idx))))))))
(else #f)))
(define (http:status-line status-code reason)
(format "HTTP/1.0 ~a ~a~a" status-code reason http:crlf))
;;@body Returns a string containing lines for each element of @1; the
;;@code{car} of which is followed by @samp{: }, then the @code{cdr}.
(define (http:header alist)
(string-append
(apply
string-append
(map
(lambda (pair)
(format "~a: ~a~a" (car pair) (cdr pair) http:crlf))
alist))
http:crlf))
@body Returns the concatenation of strings @2 with the
;;@code{(http:header @1)} and the @samp{Content-Length} prepended.
(define (http:content alist . body)
(define hunk (apply string-append body))
(string-append
(http:header
(cons
(cons
"Content-Length"
(number->string (bytevector-length (string->utf8 hunk)))) ;;(string-length hunk)
alist))
hunk))
appearing at the bottom of error pages .
(define *http:byline* #f)
@body @1 and @2 should be an integer and string as specified in
@cite{RFC 2068 } . The returned page ( string ) will show the @1 and @2
and any additional @3 @dots { } ; with @var{*http : byline * } or SLIB 's
;;default at the bottom.
(define (http:error-page status-code reason-phrase . html-strings)
(define byline
(or
*http:byline*
"Libra HTTP/1.0 Server"))
(string-append
(http:status-line status-code reason-phrase)
(http:content
'(("Content-Type" . "text/html"))
(html:head (format "~a ~a" status-code reason-phrase))
(apply html:body
(append html-strings
(list (format "<HR>\n~a\n" byline)))))))
@body reads the @dfn{URI } and @dfn{query - string } from @2 . If the
query is a valid @samp{"POST " } or " } query , then @0 calls
@1 with three arguments , the @var{request - line } , @var{query - string } ,
and @var{header - alist } . Otherwise , @0 calls @1 with the
@var{request - line } , # f , and @var{header - alist } .
;;
;;If @1 returns a string, it is sent to @3. If @1 returns a list
whose first element is an integer , then an error page with the
status integer which is the first element of the list and strings
from the list . If @1 returns a list whose first element is n't an
number , then an error page with the status code 500 and strings from
the list . If @1 returns # f , then a @samp{Bad Request } ( 400 ) page is
;;sent to @3.
;;
Otherwise , @0 replies ( to @3 ) with appropriate HTML describing the
;;problem.
(define (http:serve-query serve-proc client-socket)
(let* ([input-port (make-input-port (lambda x (void)) (utf8->string (socket:read client-socket)))]
[request-line (http:read-request-line input-port)]
[header (and request-line (http:read-header input-port))]
[query-string (and header (http:read-query-string
request-line header input-port))]
[rst (http:service serve-proc request-line query-string header)])
(socket:write client-socket (if (bytevector? rst) rst (string->utf8 rst)))))
(define (http:service serve-proc request-line query-string header)
(cond
((not request-line)
(http:error-page 400 "Bad Request."))
((string? (car request-line))
(http:error-page 501 "Not Implemented" (html:plain request-line)))
((not (memq (car request-line) '(get post)))
(http:error-page 405 "Method Not Allowed" (html:plain request-line)))
((serve-proc request-line query-string header) =>
(lambda (reply)
(cond
((bytevector? reply)
reply)
((string? reply)
(string-append (http:status-line 200 "OK")
reply))
((and (pair? reply) (list? reply))
(if (number? (car reply))
(apply http:error-page reply)
(apply http:error-page (cons 500 reply))))
(else (http:error-page 500 "Internal Server Error")))))
((not query-string)
(http:error-page 400 "Bad Request" (html:plain request-line)))
(else
(http:error-page 500 "Internal Server Error" (html:plain header)))))
(define (http:read-start-line port)
(do ((line (read-line port) (read-line port)))
((or (not (equal? "" line)) (eof-object? line)) line)))
;; @body
Request lines are a list of three itmes :
;;
;; @enumerate 0
;;
;; @item Method
;;
A symbol ( @code{options } , @code{get } , } , @code{post } ,
@code{put } , , } @dots { } ) .
;;
;; @item Request-URI
;;
;; A string. For direct HTTP, at the minimum it will be the string
;; @samp{"/"}.
;;
;; @item HTTP-Version
;;
A string . For example , } .
;; @end enumerate
(define (http:read-request-line port)
(let ((lst (request-split (http:read-start-line port))))
(and
(list? lst)
(= 3 (length lst))
(cons (string-ci->symbol (car lst)) (cdr lst)))))
;; -------------------- * helper * --------------------
(define html:blank (string->symbol ""))
;;@body Returns a string with character substitutions appropriate to
;;send @1 as an @dfn{plain-text}.
plain - text ` Data Characters '
(cond
((eq? html:blank txt) " ")
(else
(if (symbol? txt) (set! txt (symbol->string txt)))
(if (number? txt)
(number->string txt)
(irregex-replace
">"
(irregex-replace
"<"
(irregex-replace
"&"
txt
"&")
"<")
">")
))))
@args title backlink tags ...
@args title backlink
@args title
;;
Returns header string for an HTML page named @1 . If @2 is a string ,
it is used verbatim between the } tags ; otherwise @1 is
;;used. If string arguments @3 ... are supplied, then they are
;;included verbatim within the @t{<HEAD>} section.
(define (html:head title . args)
(define backlink (if (null? args) #f (car args)))
(if (not (null? args)) (set! args (cdr args)))
(string-append
(format "<!DOCTYPE HTML PUBLIC \"-//W3C//DTD HTML 3.2 Final//EN\">\n")
(format "<HTML>\n")
(format " <HEAD>\n <TITLE>~a</TITLE>\n ~a\n </HEAD>\n"
(html:plain title) (apply string-append args))
(if backlink
backlink
(format "<BODY><H1>~a</H1>\n" (html:plain title)))))
;;@body Returns HTML string to end a page.
(define (html:body . body)
(apply string-append
(append body (list (format "</BODY>\n</HTML>\n")))))
@args
@args port
;;Returns a string of the characters up to, but not including a
newline or end of file , updating @var{port } to point to the
;;character following the newline. If no characters are available, an
end of file object is returned . The @var{port } argument may be
;;omitted, in which case it defaults to the value returned by
;;@code{current-input-port}.
(define (read-line . port)
(let* ((char (apply read-char port)))
(if (eof-object? char)
char
(do ((char char (apply read-char port))
(clist '() (cons char clist)))
((or (eof-object? char) (char=? #\newline char))
(clean-line (list->string (reverse clist))))))))
(define (request-split line)
(define lst '())
(define len (string-length line))
(let loop ([beg 0] [end 0])
(cond
((> end len)
(reverse lst))
((or (= end len) (char=? #\space (string-ref line end)))
(set! lst (cons (substring line beg end) lst))
(loop (+ end 1) (+ end 1)))
(else (loop beg (+ end 1)))
)
)
)
(define string-ci->symbol
(let ((s2cis (if (equal? "x" (symbol->string 'x))
string-downcase string-upcase)))
(lambda (str) (string->symbol (s2cis str)))))
(define (string-index str chr)
(define len (string-length str))
(do ((pos 0 (+ 1 pos)))
((or (>= pos len) (char=? chr (string-ref str pos)))
(and (< pos len) pos))))
(define (clean-line str)
(define idx (- (string-length str) 1))
(if (char=? (string-ref str idx) #\return)
(substring str 0 idx)
str))
)
| null | https://raw.githubusercontent.com/theschemer/libra/3395b0b0550df264e45b7073d35aa29f85395d20/libra/server/http-cgi.scm | scheme | "http-cgi.scm" service HTTP or CGI requests. -*-scheme-*-
Permission to copy this software, to modify it, to redistribute it,
to distribute modified versions, and to use it for any purpose is
granted, subject to the following restrictions and understandings.
in full.
this software will be error-free, and I am under no obligation to
provide any services, by way of maintenance, update, or otherwise.
material, there shall be no use of my name in any advertising,
promotional, or sales literature without prior written consent in
each case.
@code{(require 'http)} or @code{(require 'cgi)}
@ftindex http
@ftindex cgi
@body Returns a string containing lines for each element of @1; the
@code{car} of which is followed by @samp{: }, then the @code{cdr}.
@code{(http:header @1)} and the @samp{Content-Length} prepended.
(string-length hunk)
with @var{*http : byline * } or SLIB 's
default at the bottom.
If @1 returns a string, it is sent to @3. If @1 returns a list
sent to @3.
problem.
@body
@enumerate 0
@item Method
@item Request-URI
A string. For direct HTTP, at the minimum it will be the string
@samp{"/"}.
@item HTTP-Version
@end enumerate
-------------------- * helper * --------------------
@body Returns a string with character substitutions appropriate to
send @1 as an @dfn{plain-text}.
otherwise @1 is
used. If string arguments @3 ... are supplied, then they are
included verbatim within the @t{<HEAD>} section.
@body Returns HTML string to end a page.
Returns a string of the characters up to, but not including a
character following the newline. If no characters are available, an
omitted, in which case it defaults to the value returned by
@code{current-input-port}. | Copyright 1997 , 1998 , 2000 , 2001 , 2003
1 . Any copy made of this software must include this copyright notice
2 . I have made no warranty or representation that the operation of
3 . In conjunction with products arising from the use of this
(library (libra server http-cgi)
(export
http:read-header
http:read-query-string
http:status-line
http:header
http:content
http:error-page
http:serve-query
http:read-start-line
http:read-request-line
html:head
html:body)
(import
(scheme)
(socket socket)
(irregex irregex))
(define http:crlf (string (integer->char 13) #\newline))
(define (http:read-header port)
(define alist '())
(do ((line (read-line port) (read-line port)))
((or (zero? (string-length line))
(and (= 1 (string-length line))
(char-whitespace? (string-ref line 0)))
(eof-object? line))
(if (and (= 1 (string-length line))
(char-whitespace? (string-ref line 0)))
(set! http:crlf (string (string-ref line 0) #\newline)))
(if (eof-object? line) line alist))
(let ((len (string-length line))
(idx (string-index line #\:)))
(if (char-whitespace? (string-ref line (+ -1 len)))
(set! len (+ -1 len)))
(and idx
(do ((idx2 (+ idx 1) (+ idx2 1)))
((or (>= idx2 len)
(not (char-whitespace? (string-ref line idx2))))
(set! alist
(cons
(cons (string-ci->symbol (substring line 0 idx))
(substring line idx2 len))
alist)))
)
)
)
)
)
(define (http:read-query-string request-line header port)
(case (car request-line)
((get head)
(let* ((request-uri (cadr request-line))
(len (string-length request-uri)))
(and
(> len 3)
(string-index request-uri #\?)
(substring request-uri
(+ 1 (string-index request-uri #\?))
(if (eqv? #\/ (string-ref request-uri (+ -1 len)))
(+ -1 len)
len)))))
((post put delete)
(let ((content-length (assq 'content-length header)))
(and
content-length
(set! content-length (string->number (cdr content-length))))
(and
content-length
(let ((str (make-string content-length #\space)))
(do ((idx 0 (+ idx 1)))
((>= idx content-length)
(if (>= idx (string-length str))
str
(substring str 0 idx)))
(let ((chr (read-char port)))
(if (char? chr)
(string-set! str idx chr)
(set! content-length idx))))))))
(else #f)))
(define (http:status-line status-code reason)
(format "HTTP/1.0 ~a ~a~a" status-code reason http:crlf))
(define (http:header alist)
(string-append
(apply
string-append
(map
(lambda (pair)
(format "~a: ~a~a" (car pair) (cdr pair) http:crlf))
alist))
http:crlf))
@body Returns the concatenation of strings @2 with the
(define (http:content alist . body)
(define hunk (apply string-append body))
(string-append
(http:header
(cons
(cons
"Content-Length"
alist))
hunk))
appearing at the bottom of error pages .
(define *http:byline* #f)
@body @1 and @2 should be an integer and string as specified in
@cite{RFC 2068 } . The returned page ( string ) will show the @1 and @2
(define (http:error-page status-code reason-phrase . html-strings)
(define byline
(or
*http:byline*
"Libra HTTP/1.0 Server"))
(string-append
(http:status-line status-code reason-phrase)
(http:content
'(("Content-Type" . "text/html"))
(html:head (format "~a ~a" status-code reason-phrase))
(apply html:body
(append html-strings
(list (format "<HR>\n~a\n" byline)))))))
@body reads the @dfn{URI } and @dfn{query - string } from @2 . If the
query is a valid @samp{"POST " } or " } query , then @0 calls
@1 with three arguments , the @var{request - line } , @var{query - string } ,
and @var{header - alist } . Otherwise , @0 calls @1 with the
@var{request - line } , # f , and @var{header - alist } .
whose first element is an integer , then an error page with the
status integer which is the first element of the list and strings
from the list . If @1 returns a list whose first element is n't an
number , then an error page with the status code 500 and strings from
the list . If @1 returns # f , then a @samp{Bad Request } ( 400 ) page is
Otherwise , @0 replies ( to @3 ) with appropriate HTML describing the
(define (http:serve-query serve-proc client-socket)
(let* ([input-port (make-input-port (lambda x (void)) (utf8->string (socket:read client-socket)))]
[request-line (http:read-request-line input-port)]
[header (and request-line (http:read-header input-port))]
[query-string (and header (http:read-query-string
request-line header input-port))]
[rst (http:service serve-proc request-line query-string header)])
(socket:write client-socket (if (bytevector? rst) rst (string->utf8 rst)))))
(define (http:service serve-proc request-line query-string header)
(cond
((not request-line)
(http:error-page 400 "Bad Request."))
((string? (car request-line))
(http:error-page 501 "Not Implemented" (html:plain request-line)))
((not (memq (car request-line) '(get post)))
(http:error-page 405 "Method Not Allowed" (html:plain request-line)))
((serve-proc request-line query-string header) =>
(lambda (reply)
(cond
((bytevector? reply)
reply)
((string? reply)
(string-append (http:status-line 200 "OK")
reply))
((and (pair? reply) (list? reply))
(if (number? (car reply))
(apply http:error-page reply)
(apply http:error-page (cons 500 reply))))
(else (http:error-page 500 "Internal Server Error")))))
((not query-string)
(http:error-page 400 "Bad Request" (html:plain request-line)))
(else
(http:error-page 500 "Internal Server Error" (html:plain header)))))
(define (http:read-start-line port)
(do ((line (read-line port) (read-line port)))
((or (not (equal? "" line)) (eof-object? line)) line)))
Request lines are a list of three itmes :
A symbol ( @code{options } , @code{get } , } , @code{post } ,
@code{put } , , } @dots { } ) .
A string . For example , } .
(define (http:read-request-line port)
(let ((lst (request-split (http:read-start-line port))))
(and
(list? lst)
(= 3 (length lst))
(cons (string-ci->symbol (car lst)) (cdr lst)))))
(define html:blank (string->symbol ""))
plain - text ` Data Characters '
(cond
((eq? html:blank txt) " ")
(else
(if (symbol? txt) (set! txt (symbol->string txt)))
(if (number? txt)
(number->string txt)
(irregex-replace
">"
(irregex-replace
"<"
(irregex-replace
"&"
txt
"&")
"<")
">")
))))
@args title backlink tags ...
@args title backlink
@args title
Returns header string for an HTML page named @1 . If @2 is a string ,
(define (html:head title . args)
(define backlink (if (null? args) #f (car args)))
(if (not (null? args)) (set! args (cdr args)))
(string-append
(format "<!DOCTYPE HTML PUBLIC \"-//W3C//DTD HTML 3.2 Final//EN\">\n")
(format "<HTML>\n")
(format " <HEAD>\n <TITLE>~a</TITLE>\n ~a\n </HEAD>\n"
(html:plain title) (apply string-append args))
(if backlink
backlink
(format "<BODY><H1>~a</H1>\n" (html:plain title)))))
(define (html:body . body)
(apply string-append
(append body (list (format "</BODY>\n</HTML>\n")))))
@args
@args port
newline or end of file , updating @var{port } to point to the
end of file object is returned . The @var{port } argument may be
(define (read-line . port)
(let* ((char (apply read-char port)))
(if (eof-object? char)
char
(do ((char char (apply read-char port))
(clist '() (cons char clist)))
((or (eof-object? char) (char=? #\newline char))
(clean-line (list->string (reverse clist))))))))
(define (request-split line)
(define lst '())
(define len (string-length line))
(let loop ([beg 0] [end 0])
(cond
((> end len)
(reverse lst))
((or (= end len) (char=? #\space (string-ref line end)))
(set! lst (cons (substring line beg end) lst))
(loop (+ end 1) (+ end 1)))
(else (loop beg (+ end 1)))
)
)
)
(define string-ci->symbol
(let ((s2cis (if (equal? "x" (symbol->string 'x))
string-downcase string-upcase)))
(lambda (str) (string->symbol (s2cis str)))))
(define (string-index str chr)
(define len (string-length str))
(do ((pos 0 (+ 1 pos)))
((or (>= pos len) (char=? chr (string-ref str pos)))
(and (< pos len) pos))))
(define (clean-line str)
(define idx (- (string-length str) 1))
(if (char=? (string-ref str idx) #\return)
(substring str 0 idx)
str))
)
|
1ea28abced743a0d128fbe1b351c8b32b8ddc0b1ede3780b479c6119247af649 | tschady/advent-of-code | d21.clj | (ns aoc.2015.d21
(:require [aoc.file-util :as file-util]
[clojure.math.combinatorics :as combo]))
(def input (file-util/read-ints "2015/d21.txt"))
(def hero-hp 100)
(defn parse-boss [input] (zipmap [:hp :damage :armor] input))
(def shop {:weapons [{:cost 8 :damage 4 :armor 0}
{:cost 10 :damage 5 :armor 0}
{:cost 25 :damage 6 :armor 0}
{:cost 40 :damage 7 :armor 0}
{:cost 74 :damage 8 :armor 0}]
:armor [{:cost 0 :damage 0 :armor 0} ; = no purchase
{:cost 13 :damage 0 :armor 1}
{:cost 31 :damage 0 :armor 2}
{:cost 53 :damage 0 :armor 3}
{:cost 75 :damage 0 :armor 4}
{:cost 102 :damage 0 :armor 5}]
:rings [{:cost 0 :damage 0 :armor 0} ; = no purchase
{:cost 0 :damage 0 :armor 0} ; = no purchase
{:cost 25 :damage 1 :armor 0}
{:cost 50 :damage 2 :armor 0}
{:cost 100 :damage 3 :armor 0}
{:cost 20 :damage 0 :armor 1}
{:cost 40 :damage 0 :armor 2}
{:cost 80 :damage 0 :armor 3}]})
(defn hero-variants
"Returns net stats of all possible combinations of hero's equipment purchases
from given `shop` for constraints: 1 weapon, 0-1 armor, 0-2 rings"
[hp shop]
(for [weapon (:weapons shop)
armor (:armor shop)
[ring1 ring2] (combo/combinations (:rings shop) 2)]
(assoc (merge-with + weapon armor ring1 ring2) :hp hp)))
(defn- turns-to-kill [hp damage armor]
(let [hit-per-turn (max 1 (- damage armor))]
(Math/ceil (/ hp hit-per-turn))))
(defn- hero-wins? [boss hero]
(<= (turns-to-kill (:hp boss) (:damage hero) (:armor boss))
(turns-to-kill (:hp hero) (:damage boss) (:armor hero))))
(defn part-1 [input hero-hp]
(let [boss (parse-boss input)
heros (hero-variants hero-hp shop)]
(->> heros
(filter #(hero-wins? boss %))
(map :cost)
(apply min))))
(defn part-2 [input hero-hp]
(let [boss (parse-boss input)
heros (hero-variants hero-hp shop)]
(->> heros
(remove #(hero-wins? boss %))
(map :cost)
(apply max))))
| null | https://raw.githubusercontent.com/tschady/advent-of-code/63db07edf0c60f2a575f9f4a1852d3b06dbb82f9/src/aoc/2015/d21.clj | clojure | = no purchase
= no purchase
= no purchase | (ns aoc.2015.d21
(:require [aoc.file-util :as file-util]
[clojure.math.combinatorics :as combo]))
(def input (file-util/read-ints "2015/d21.txt"))
(def hero-hp 100)
(defn parse-boss [input] (zipmap [:hp :damage :armor] input))
(def shop {:weapons [{:cost 8 :damage 4 :armor 0}
{:cost 10 :damage 5 :armor 0}
{:cost 25 :damage 6 :armor 0}
{:cost 40 :damage 7 :armor 0}
{:cost 74 :damage 8 :armor 0}]
{:cost 13 :damage 0 :armor 1}
{:cost 31 :damage 0 :armor 2}
{:cost 53 :damage 0 :armor 3}
{:cost 75 :damage 0 :armor 4}
{:cost 102 :damage 0 :armor 5}]
{:cost 25 :damage 1 :armor 0}
{:cost 50 :damage 2 :armor 0}
{:cost 100 :damage 3 :armor 0}
{:cost 20 :damage 0 :armor 1}
{:cost 40 :damage 0 :armor 2}
{:cost 80 :damage 0 :armor 3}]})
(defn hero-variants
"Returns net stats of all possible combinations of hero's equipment purchases
from given `shop` for constraints: 1 weapon, 0-1 armor, 0-2 rings"
[hp shop]
(for [weapon (:weapons shop)
armor (:armor shop)
[ring1 ring2] (combo/combinations (:rings shop) 2)]
(assoc (merge-with + weapon armor ring1 ring2) :hp hp)))
(defn- turns-to-kill [hp damage armor]
(let [hit-per-turn (max 1 (- damage armor))]
(Math/ceil (/ hp hit-per-turn))))
(defn- hero-wins? [boss hero]
(<= (turns-to-kill (:hp boss) (:damage hero) (:armor boss))
(turns-to-kill (:hp hero) (:damage boss) (:armor hero))))
(defn part-1 [input hero-hp]
(let [boss (parse-boss input)
heros (hero-variants hero-hp shop)]
(->> heros
(filter #(hero-wins? boss %))
(map :cost)
(apply min))))
(defn part-2 [input hero-hp]
(let [boss (parse-boss input)
heros (hero-variants hero-hp shop)]
(->> heros
(remove #(hero-wins? boss %))
(map :cost)
(apply max))))
|
e8dd15d62dd1125f850661184b533b30be0247f639813f135e410775f0a1b565 | tachukao/ocaml-mujoco | fun_gen.ml | open Base
open Common
let write_stubs ~stubs_filename s =
let sections = parse_sect s in
Stdio.Out_channel.with_file stubs_filename ~f:(fun ch ->
let ps = p ch in
p_auto ch;
ps "open Ctypes";
ps "module Typs = Typs";
ps "open Typs";
ps "module Bindings (F : FOREIGN) = struct";
ps "open F\n";
List.iter sections ~f:(fun section ->
match section with
| `Delim delim -> p ch "%s\n" (convert_docstr Re.Group.(get delim 1))
| `Text s ->
List.iter (parse_func s) ~f:(fun cfunc ->
p ch "%s" cfunc.docstr;
p
ch
"let %s = foreign \"%s\" (%s @-> returning %s)"
cfunc.func
cfunc.func
String.(concat ~sep:" @-> " cfunc.args)
cfunc.rval;
p ch "\n"));
ps "end")
let write stubs_filename = snd (read_file "mujoco.h") |> write_stubs ~stubs_filename
| null | https://raw.githubusercontent.com/tachukao/ocaml-mujoco/a17bba51e7a0166caa6a8693ac1d9178dd004a38/src/autogen/fun_gen.ml | ocaml | open Base
open Common
let write_stubs ~stubs_filename s =
let sections = parse_sect s in
Stdio.Out_channel.with_file stubs_filename ~f:(fun ch ->
let ps = p ch in
p_auto ch;
ps "open Ctypes";
ps "module Typs = Typs";
ps "open Typs";
ps "module Bindings (F : FOREIGN) = struct";
ps "open F\n";
List.iter sections ~f:(fun section ->
match section with
| `Delim delim -> p ch "%s\n" (convert_docstr Re.Group.(get delim 1))
| `Text s ->
List.iter (parse_func s) ~f:(fun cfunc ->
p ch "%s" cfunc.docstr;
p
ch
"let %s = foreign \"%s\" (%s @-> returning %s)"
cfunc.func
cfunc.func
String.(concat ~sep:" @-> " cfunc.args)
cfunc.rval;
p ch "\n"));
ps "end")
let write stubs_filename = snd (read_file "mujoco.h") |> write_stubs ~stubs_filename
| |
985b67246984d2fe2393f91f3c7c15ec039ce43ac0492e008548be794822c172 | theodormoroianu/SecondYearCourses | HaskellChurch_20210415163005.hs | {-# LANGUAGE RankNTypes #-}
module HaskellChurch where
A boolean is any way to choose between two alternatives
newtype CBool = CBool {cIf :: forall t. t -> t -> t}
An instance to show as regular Booleans
instance Show CBool where
show b = show $ cIf b True False
The boolean constant true always chooses the first alternative
cTrue :: CBool
cTrue = undefined
The boolean constant false always chooses the second alternative
cFalse :: CBool
cFalse = undefined
--The boolean negation switches the alternatives
cNot :: CBool -> CBool
cNot = undefined
--The boolean conjunction can be built as a conditional
(&&:) :: CBool -> CBool -> CBool
(&&:) = undefined
infixr 3 &&:
--The boolean disjunction can be built as a conditional
(||:) :: CBool -> CBool -> CBool
(||:) = undefined
infixr 2 ||:
-- a pair is a way to compute something based on the values
-- contained within the pair.
newtype CPair a b = CPair { cOn :: forall c . (a -> b -> c) -> c }
An instance to show CPairs as regular pairs .
instance (Show a, Show b) => Show (CPair a b) where
show p = show $ cOn p (,)
builds a pair out of two values as an object which , when given
--a function to be applied on the values, it will apply it on them.
cPair :: a -> b -> CPair a b
cPair = undefined
first projection uses the function selecting first component on a pair
cFst :: CPair a b -> a
cFst = undefined
second projection
cSnd :: CPair a b -> b
cSnd = undefined
-- A natural number is any way to iterate a function s a number of times
-- over an initial value z
newtype CNat = CNat { cFor :: forall t. (t -> t) -> t -> t }
-- An instance to show CNats as regular natural numbers
instance Show CNat where
show n = show $ cFor n (1 +) (0 :: Integer)
--0 will iterate the function s 0 times over z, producing z
c0 :: CNat
c0 = undefined
1 is the the function s iterated 1 times over z , that is , z
c1 :: CNat
c1 = undefined
--Successor n either
- applies s one more time in addition to what n does
-- - iterates s n times over (s z)
cS :: CNat -> CNat
cS = undefined
--Addition of m and n is done by iterating s n times over m
(+:) :: CNat -> CNat -> CNat
(+:) = undefined
infixl 6 +:
--Multiplication of m and n can be done by composing n and m
(*:) :: CNat -> CNat -> CNat
(*:) = \n m -> CNat $ cFor n . cFor m
infixl 7 *:
--Exponentiation of m and n can be done by applying n to m
(^:) :: CNat -> CNat -> CNat
(^:) = \m n -> CNat $ cFor n (cFor m)
infixr 8 ^:
--Testing whether a value is 0 can be done through iteration
-- using a function constantly false and an initial value true
cIs0 :: CNat -> CBool
cIs0 = \n -> cFor n (\_ -> cFalse) cTrue
Predecessor ( evaluating to 0 for 0 ) can be defined iterating
over pairs , starting from an initial value ( 0 , 0 )
cPred :: CNat -> CNat
cPred = undefined
substraction from m n ( evaluating to 0 if m < n ) is repeated application
-- of the predeccesor function
(-:) :: CNat -> CNat -> CNat
(-:) = \m n -> cFor n cPred m
Transform a value into a CNat ( should yield c0 for nums < = 0 )
cNat :: (Ord p, Num p) => p -> CNat
cNat n = undefined
We can define an instance Num CNat which will allow us to see any
integer constant as a CNat ( e.g. 12 : : CNat ) and also use regular
-- arithmetic
instance Num CNat where
(+) = (+:)
(*) = (*:)
(-) = (-:)
abs = id
signum n = cIf (cIs0 n) 0 1
fromInteger = cNat
-- m is less than (or equal to) n if when substracting n from m we get 0
(<=:) :: CNat -> CNat -> CBool
(<=:) = undefined
infix 4 <=:
(>=:) :: CNat -> CNat -> CBool
(>=:) = \m n -> n <=: m
infix 4 >=:
(<:) :: CNat -> CNat -> CBool
(<:) = \m n -> cNot (m >=: n)
infix 4 <:
(>:) :: CNat -> CNat -> CBool
(>:) = \m n -> n <: m
infix 4 >:
-- equality on naturals can be defined my means of comparisons
(==:) :: CNat -> CNat -> CBool
(==:) = undefined
--Fun with arithmetic and pairs
--Define factorial. You can iterate over a pair to contain the current index and so far factorial
cFactorial :: CNat -> CNat
cFactorial = undefined
Define Fibonacci . You can iterate over a pair to contain two consecutive numbers in the sequence
cFibonacci :: CNat -> CNat
cFibonacci = undefined
--Given m and n, compute q and r satisfying m = q * n + r. If n is not 0 then r should be less than n.
--hint repeated substraction, iterated for at most m times.
cDivMod :: CNat -> CNat -> CPair CNat CNat
cDivMod = undefined
-- a list is a way to aggregate a sequence of elements given an aggregation function and an initial value.
newtype CList a = CList { cFoldR :: forall b. (a -> b -> b) -> b -> b }
instance Foldable CList where
foldr = undefined
--An instance to show CLists as regular lists.
instance (Show a) => Show (CList a) where
show l = show $ toList l
churchNil :: Term
churchNil = lams ["agg", "init"] (v "init")
cNil :: CList a
cNil = CList $ \agg init -> init
churchCons :: Term
churchCons = lams ["x","l","agg", "init"]
(v "agg"
$$ v "x"
$$ (v "l" $$ v "agg" $$ v "init")
)
(.:) :: a -> CList a -> CList a
(.:) = \x xs -> CList $ \agg init -> agg x (cFoldR xs agg init)
churchList :: [Term] -> Term
churchList = foldr (\x l -> churchCons $$ x $$ l) churchNil
cList :: [a] -> CList a
cList = foldr (.:) cNil
churchNatList :: [Integer] -> Term
churchNatList = churchList . map churchNat
cNatList :: [Integer] -> CList CNat
cNatList = cList . map cNat
churchSum :: Term
churchSum = lam "l" (v "l" $$ churchPlus $$ church0)
cSum :: CList CNat -> CNat
since CList is an instance of Foldable ; otherwise : \l - > cFoldR l ( + ) 0
churchIsNil :: Term
churchIsNil = lam "l" (v "l" $$ lams ["x", "a"] churchFalse $$ churchTrue)
cIsNil :: CList a -> CBool
cIsNil = \l -> cFoldR l (\_ _ -> cFalse) cTrue
churchHead :: Term
churchHead = lams ["l", "default"] (v "l" $$ lams ["x", "a"] (v "x") $$ v "default")
cHead :: CList a -> a -> a
cHead = \l d -> cFoldR l (\x _ -> x) d
churchTail :: Term
churchTail = lam "l" (churchFst $$
(v "l"
$$ lams ["x","p"] (lam "t" (churchPair $$ v "t" $$ (churchCons $$ v "x" $$ v "t"))
$$ (churchSnd $$ v "p"))
$$ (churchPair $$ churchNil $$ churchNil)
))
cTail :: CList a -> CList a
cTail = \l -> cFst $ cFoldR l (\x p -> (\t -> cPair t (x .: t)) (cSnd p)) (cPair cNil cNil)
cLength :: CList a -> CNat
cLength = \l -> cFoldR l (\_ n -> cS n) 0
fix :: Term
fix = lam "f" (lam "x" (v "f" $$ (v "x" $$ v "x")) $$ lam "x" (v "f" $$ (v "x" $$ v "x")))
divmod :: (Enum a, Num a, Ord b, Num b) => b -> b -> (a, b)
divmod m n = divmod' (0, 0)
where
divmod' (x, y)
| x' <= m = divmod' (x', succ y)
| otherwise = (y, m - x)
where x' = x + n
divmod' m n =
if n == 0 then (0, m)
else
Function.fix
(\f p ->
(\x' ->
if x' > 0 then f ((,) (succ (fst p)) x')
else if (<=) n (snd p) then ((,) (succ (fst p)) 0)
else p)
((-) (snd p) n))
(0, m)
churchDivMod' :: Term
churchDivMod' = lams ["m", "n"]
(churchIs0 $$ v "n"
$$ (churchPair $$ church0 $$ v "m")
$$ (fix
$$ lams ["f", "p"]
(lam "x"
(churchIs0 $$ v "x"
$$ (churchLte $$ v "n" $$ (churchSnd $$ v "p")
$$ (churchPair $$ (churchS $$ (churchFst $$ v "p")) $$ church0)
$$ v "p"
)
$$ (v "f" $$ (churchPair $$ (churchS $$ (churchFst $$ v "p")) $$ v "x"))
)
$$ (churchSub $$ (churchSnd $$ v "p") $$ v "n")
)
$$ (churchPair $$ church0 $$ v "m")
)
)
churchSudan :: Term
churchSudan = fix $$ lam "f" (lams ["n", "x", "y"]
(churchIs0 $$ v "n"
$$ (churchPlus $$ v "x" $$ v "y")
$$ (churchIs0 $$ v "y"
$$ v "x"
$$ (lam "fnpy"
(v "f" $$ (churchPred $$ v "n")
$$ v "fnpy"
$$ (churchPlus $$ v "fnpy" $$ v "y")
)
$$ (v "f" $$ v "n" $$ v "x" $$ (churchPred $$ v "y"))
)
)
))
churchAckermann :: Term
churchAckermann = fix $$ lam "A" (lams ["m", "n"]
(churchIs0 $$ v "m"
$$ (churchS $$ v "n")
$$ (churchIs0 $$ v "n"
$$ (v "A" $$ (churchPred $$ v "m") $$ church1)
$$ (v "A" $$ (churchPred $$ v "m")
$$ (v "A" $$ v "m" $$ (churchPred $$ v "n")))
)
)
)
| null | https://raw.githubusercontent.com/theodormoroianu/SecondYearCourses/5e359e6a7cf588a527d27209bf53b4ce6b8d5e83/FLP/Laboratoare/Lab%209/.history/HaskellChurch_20210415163005.hs | haskell | # LANGUAGE RankNTypes #
The boolean negation switches the alternatives
The boolean conjunction can be built as a conditional
The boolean disjunction can be built as a conditional
a pair is a way to compute something based on the values
contained within the pair.
a function to be applied on the values, it will apply it on them.
A natural number is any way to iterate a function s a number of times
over an initial value z
An instance to show CNats as regular natural numbers
0 will iterate the function s 0 times over z, producing z
Successor n either
- iterates s n times over (s z)
Addition of m and n is done by iterating s n times over m
Multiplication of m and n can be done by composing n and m
Exponentiation of m and n can be done by applying n to m
Testing whether a value is 0 can be done through iteration
using a function constantly false and an initial value true
of the predeccesor function
arithmetic
m is less than (or equal to) n if when substracting n from m we get 0
equality on naturals can be defined my means of comparisons
Fun with arithmetic and pairs
Define factorial. You can iterate over a pair to contain the current index and so far factorial
Given m and n, compute q and r satisfying m = q * n + r. If n is not 0 then r should be less than n.
hint repeated substraction, iterated for at most m times.
a list is a way to aggregate a sequence of elements given an aggregation function and an initial value.
An instance to show CLists as regular lists. | module HaskellChurch where
A boolean is any way to choose between two alternatives
newtype CBool = CBool {cIf :: forall t. t -> t -> t}
An instance to show as regular Booleans
instance Show CBool where
show b = show $ cIf b True False
The boolean constant true always chooses the first alternative
cTrue :: CBool
cTrue = undefined
The boolean constant false always chooses the second alternative
cFalse :: CBool
cFalse = undefined
cNot :: CBool -> CBool
cNot = undefined
(&&:) :: CBool -> CBool -> CBool
(&&:) = undefined
infixr 3 &&:
(||:) :: CBool -> CBool -> CBool
(||:) = undefined
infixr 2 ||:
newtype CPair a b = CPair { cOn :: forall c . (a -> b -> c) -> c }
An instance to show CPairs as regular pairs .
instance (Show a, Show b) => Show (CPair a b) where
show p = show $ cOn p (,)
builds a pair out of two values as an object which , when given
cPair :: a -> b -> CPair a b
cPair = undefined
first projection uses the function selecting first component on a pair
cFst :: CPair a b -> a
cFst = undefined
second projection
cSnd :: CPair a b -> b
cSnd = undefined
newtype CNat = CNat { cFor :: forall t. (t -> t) -> t -> t }
instance Show CNat where
show n = show $ cFor n (1 +) (0 :: Integer)
c0 :: CNat
c0 = undefined
1 is the the function s iterated 1 times over z , that is , z
c1 :: CNat
c1 = undefined
- applies s one more time in addition to what n does
cS :: CNat -> CNat
cS = undefined
(+:) :: CNat -> CNat -> CNat
(+:) = undefined
infixl 6 +:
(*:) :: CNat -> CNat -> CNat
(*:) = \n m -> CNat $ cFor n . cFor m
infixl 7 *:
(^:) :: CNat -> CNat -> CNat
(^:) = \m n -> CNat $ cFor n (cFor m)
infixr 8 ^:
cIs0 :: CNat -> CBool
cIs0 = \n -> cFor n (\_ -> cFalse) cTrue
Predecessor ( evaluating to 0 for 0 ) can be defined iterating
over pairs , starting from an initial value ( 0 , 0 )
cPred :: CNat -> CNat
cPred = undefined
substraction from m n ( evaluating to 0 if m < n ) is repeated application
(-:) :: CNat -> CNat -> CNat
(-:) = \m n -> cFor n cPred m
Transform a value into a CNat ( should yield c0 for nums < = 0 )
cNat :: (Ord p, Num p) => p -> CNat
cNat n = undefined
We can define an instance Num CNat which will allow us to see any
integer constant as a CNat ( e.g. 12 : : CNat ) and also use regular
instance Num CNat where
(+) = (+:)
(*) = (*:)
(-) = (-:)
abs = id
signum n = cIf (cIs0 n) 0 1
fromInteger = cNat
(<=:) :: CNat -> CNat -> CBool
(<=:) = undefined
infix 4 <=:
(>=:) :: CNat -> CNat -> CBool
(>=:) = \m n -> n <=: m
infix 4 >=:
(<:) :: CNat -> CNat -> CBool
(<:) = \m n -> cNot (m >=: n)
infix 4 <:
(>:) :: CNat -> CNat -> CBool
(>:) = \m n -> n <: m
infix 4 >:
(==:) :: CNat -> CNat -> CBool
(==:) = undefined
cFactorial :: CNat -> CNat
cFactorial = undefined
Define Fibonacci . You can iterate over a pair to contain two consecutive numbers in the sequence
cFibonacci :: CNat -> CNat
cFibonacci = undefined
cDivMod :: CNat -> CNat -> CPair CNat CNat
cDivMod = undefined
newtype CList a = CList { cFoldR :: forall b. (a -> b -> b) -> b -> b }
instance Foldable CList where
foldr = undefined
instance (Show a) => Show (CList a) where
show l = show $ toList l
churchNil :: Term
churchNil = lams ["agg", "init"] (v "init")
cNil :: CList a
cNil = CList $ \agg init -> init
churchCons :: Term
churchCons = lams ["x","l","agg", "init"]
(v "agg"
$$ v "x"
$$ (v "l" $$ v "agg" $$ v "init")
)
(.:) :: a -> CList a -> CList a
(.:) = \x xs -> CList $ \agg init -> agg x (cFoldR xs agg init)
churchList :: [Term] -> Term
churchList = foldr (\x l -> churchCons $$ x $$ l) churchNil
cList :: [a] -> CList a
cList = foldr (.:) cNil
churchNatList :: [Integer] -> Term
churchNatList = churchList . map churchNat
cNatList :: [Integer] -> CList CNat
cNatList = cList . map cNat
churchSum :: Term
churchSum = lam "l" (v "l" $$ churchPlus $$ church0)
cSum :: CList CNat -> CNat
since CList is an instance of Foldable ; otherwise : \l - > cFoldR l ( + ) 0
churchIsNil :: Term
churchIsNil = lam "l" (v "l" $$ lams ["x", "a"] churchFalse $$ churchTrue)
cIsNil :: CList a -> CBool
cIsNil = \l -> cFoldR l (\_ _ -> cFalse) cTrue
churchHead :: Term
churchHead = lams ["l", "default"] (v "l" $$ lams ["x", "a"] (v "x") $$ v "default")
cHead :: CList a -> a -> a
cHead = \l d -> cFoldR l (\x _ -> x) d
churchTail :: Term
churchTail = lam "l" (churchFst $$
(v "l"
$$ lams ["x","p"] (lam "t" (churchPair $$ v "t" $$ (churchCons $$ v "x" $$ v "t"))
$$ (churchSnd $$ v "p"))
$$ (churchPair $$ churchNil $$ churchNil)
))
cTail :: CList a -> CList a
cTail = \l -> cFst $ cFoldR l (\x p -> (\t -> cPair t (x .: t)) (cSnd p)) (cPair cNil cNil)
cLength :: CList a -> CNat
cLength = \l -> cFoldR l (\_ n -> cS n) 0
fix :: Term
fix = lam "f" (lam "x" (v "f" $$ (v "x" $$ v "x")) $$ lam "x" (v "f" $$ (v "x" $$ v "x")))
divmod :: (Enum a, Num a, Ord b, Num b) => b -> b -> (a, b)
divmod m n = divmod' (0, 0)
where
divmod' (x, y)
| x' <= m = divmod' (x', succ y)
| otherwise = (y, m - x)
where x' = x + n
divmod' m n =
if n == 0 then (0, m)
else
Function.fix
(\f p ->
(\x' ->
if x' > 0 then f ((,) (succ (fst p)) x')
else if (<=) n (snd p) then ((,) (succ (fst p)) 0)
else p)
((-) (snd p) n))
(0, m)
churchDivMod' :: Term
churchDivMod' = lams ["m", "n"]
(churchIs0 $$ v "n"
$$ (churchPair $$ church0 $$ v "m")
$$ (fix
$$ lams ["f", "p"]
(lam "x"
(churchIs0 $$ v "x"
$$ (churchLte $$ v "n" $$ (churchSnd $$ v "p")
$$ (churchPair $$ (churchS $$ (churchFst $$ v "p")) $$ church0)
$$ v "p"
)
$$ (v "f" $$ (churchPair $$ (churchS $$ (churchFst $$ v "p")) $$ v "x"))
)
$$ (churchSub $$ (churchSnd $$ v "p") $$ v "n")
)
$$ (churchPair $$ church0 $$ v "m")
)
)
churchSudan :: Term
churchSudan = fix $$ lam "f" (lams ["n", "x", "y"]
(churchIs0 $$ v "n"
$$ (churchPlus $$ v "x" $$ v "y")
$$ (churchIs0 $$ v "y"
$$ v "x"
$$ (lam "fnpy"
(v "f" $$ (churchPred $$ v "n")
$$ v "fnpy"
$$ (churchPlus $$ v "fnpy" $$ v "y")
)
$$ (v "f" $$ v "n" $$ v "x" $$ (churchPred $$ v "y"))
)
)
))
churchAckermann :: Term
churchAckermann = fix $$ lam "A" (lams ["m", "n"]
(churchIs0 $$ v "m"
$$ (churchS $$ v "n")
$$ (churchIs0 $$ v "n"
$$ (v "A" $$ (churchPred $$ v "m") $$ church1)
$$ (v "A" $$ (churchPred $$ v "m")
$$ (v "A" $$ v "m" $$ (churchPred $$ v "n")))
)
)
)
|
93bebb8cc504ee9b5bcbfcb6ffd11bd36d9c485c956e1b96fd56e7016eebe961 | pirapira/coq2rust | logic.ml | (************************************************************************)
v * The Coq Proof Assistant / The Coq Development Team
< O _ _ _ , , * INRIA - CNRS - LIX - LRI - PPS - Copyright 1999 - 2012
\VV/ * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
(* // * This file is distributed under the terms of the *)
(* * GNU Lesser General Public License Version 2.1 *)
(************************************************************************)
open Pp
open Errors
open Util
open Names
open Nameops
open Term
open Vars
open Context
open Termops
open Environ
open Reductionops
open Inductiveops
open Typing
open Proof_type
open Type_errors
open Retyping
open Misctypes
type refiner_error =
(* Errors raised by the refiner *)
| BadType of constr * constr * constr
| UnresolvedBindings of Name.t list
| CannotApply of constr * constr
| NotWellTyped of constr
| NonLinearProof of constr
| MetaInType of constr
(* Errors raised by the tactics *)
| IntroNeedsProduct
| DoesNotOccurIn of constr * Id.t
| NoSuchHyp of Id.t
exception RefinerError of refiner_error
open Pretype_errors
* FIXME : this is quite brittle . Why not accept any PretypeError ?
let is_typing_error = function
| UnexpectedType (_, _) | NotProduct _
| VarNotFound _ | TypingError _ -> true
| _ -> false
let is_unification_error = function
| CannotUnify _ | CannotUnifyLocal _| CannotGeneralize _
| NoOccurrenceFound _ | CannotUnifyBindingType _
| ActualTypeNotCoercible _ | UnifOccurCheck _
| CannotFindWellTypedAbstraction _ | WrongAbstractionType _
| UnsolvableImplicit _| AbstractionOverMeta _
| UnsatisfiableConstraints _ -> true
| _ -> false
let catchable_exception = function
| Errors.UserError _ | TypeError _
| RefinerError _ | Indrec.RecursionSchemeError _
| Nametab.GlobalizationError _
(* reduction errors *)
| Tacred.ReductionTacticError _ -> true
(* unification and typing errors *)
| PretypeError(_,_, e) -> is_unification_error e || is_typing_error e
| _ -> false
let error_no_such_hypothesis id = raise (RefinerError (NoSuchHyp id))
(* Tells if the refiner should check that the submitted rules do not
produce invalid subgoals *)
let check = ref false
let with_check = Flags.with_option check
(* [apply_to_hyp sign id f] splits [sign] into [tail::[id,_,_]::head] and
returns [tail::(f head (id,_,_) (rev tail))] *)
let apply_to_hyp sign id f =
try apply_to_hyp sign id f
with Hyp_not_found ->
if !check then error_no_such_hypothesis id
else sign
let apply_to_hyp_and_dependent_on sign id f g =
try apply_to_hyp_and_dependent_on sign id f g
with Hyp_not_found ->
if !check then error_no_such_hypothesis id
else sign
let check_typability env sigma c =
if !check then let _ = type_of env sigma c in ()
(************************************************************************)
(************************************************************************)
(* Implementation of the structural rules (moving and deleting
hypotheses around) *)
(* The Clear tactic: it scans the context for hypotheses to be removed
(instead of iterating on the list of identifier to be removed, which
forces the user to give them in order). *)
let clear_hyps env sigma ids sign cl =
let evdref = ref (Evd.create_goal_evar_defs sigma) in
let (hyps,cl) = Evarutil.clear_hyps_in_evi env evdref sign cl ids in
(hyps, cl, !evdref)
let clear_hyps2 env sigma ids sign t cl =
let evdref = ref (Evd.create_goal_evar_defs sigma) in
let (hyps,t,cl) = Evarutil.clear_hyps2_in_evi env evdref sign t cl ids in
(hyps, t, cl, !evdref)
The ClearBody tactic
(* Reordering of the context *)
(* faire le minimum d'echanges pour que l'ordre donne soit un *)
sous - ordre du resultat . Par exemple , 2 hyps non mentionnee ne sont
pas echangees . Choix : les hyps mentionnees ne peuvent qu'etre
(* reculees par rapport aux autres (faire le contraire!) *)
let mt_q = (Id.Map.empty,[])
let push_val y = function
(_,[] as q) -> q
| (m, (x,l)::q) -> (m, (x,Id.Set.add y l)::q)
let push_item x v (m,l) =
(Id.Map.add x v m, (x,Id.Set.empty)::l)
let mem_q x (m,_) = Id.Map.mem x m
let find_q x (m,q) =
let v = Id.Map.find x m in
let m' = Id.Map.remove x m in
let rec find accs acc = function
[] -> raise Not_found
| [(x',l)] ->
if Id.equal x x' then ((v,Id.Set.union accs l),(m',List.rev acc))
else raise Not_found
| (x',l as i)::((x'',l'')::q as itl) ->
if Id.equal x x' then
((v,Id.Set.union accs l),
(m',List.rev acc@(x'',Id.Set.add x (Id.Set.union l l''))::q))
else find (Id.Set.union l accs) (i::acc) itl in
find Id.Set.empty [] q
let occur_vars_in_decl env hyps d =
if Id.Set.is_empty hyps then false else
let ohyps = global_vars_set_of_decl env d in
Id.Set.exists (fun h -> Id.Set.mem h ohyps) hyps
let reorder_context env sign ord =
let ords = List.fold_right Id.Set.add ord Id.Set.empty in
if not (Int.equal (List.length ord) (Id.Set.cardinal ords)) then
error "Order list has duplicates";
let rec step ord expected ctxt_head moved_hyps ctxt_tail =
match ord with
| [] -> List.rev ctxt_tail @ ctxt_head
| top::ord' when mem_q top moved_hyps ->
let ((d,h),mh) = find_q top moved_hyps in
if occur_vars_in_decl env h d then
errorlabstrm "reorder_context"
(str "Cannot move declaration " ++ pr_id top ++ spc() ++
str "before " ++
pr_sequence pr_id
(Id.Set.elements (Id.Set.inter h
(global_vars_set_of_decl env d))));
step ord' expected ctxt_head mh (d::ctxt_tail)
| _ ->
(match ctxt_head with
| [] -> error_no_such_hypothesis (List.hd ord)
| (x,_,_ as d) :: ctxt ->
if Id.Set.mem x expected then
step ord (Id.Set.remove x expected)
ctxt (push_item x d moved_hyps) ctxt_tail
else
step ord expected
ctxt (push_val x moved_hyps) (d::ctxt_tail)) in
step ord ords sign mt_q []
let reorder_val_context env sign ord =
val_of_named_context (reorder_context env (named_context_of_val sign) ord)
let check_decl_position env sign (x,_,_ as d) =
let needed = global_vars_set_of_decl env d in
let deps = dependency_closure env (named_context_of_val sign) needed in
if Id.List.mem x deps then
error ("Cannot create self-referring hypothesis "^Id.to_string x);
x::deps
Auxiliary functions for primitive MOVE tactic
*
* [ move_hyp with_dep toleft ( left,(hfrom , typfrom),right ) hto ] moves
* hyp [ hfrom ] at location [ hto ] which belongs to the hyps on the
* left side [ left ] of the full signature if [ toleft = true ] or to the hyps
* on the right side [ right ] if [ toleft = false ] .
* If [ with_dep ] then dependent hypotheses are moved accordingly .
*
* [move_hyp with_dep toleft (left,(hfrom,typfrom),right) hto] moves
* hyp [hfrom] at location [hto] which belongs to the hyps on the
* left side [left] of the full signature if [toleft=true] or to the hyps
* on the right side [right] if [toleft=false].
* If [with_dep] then dependent hypotheses are moved accordingly. *)
let move_location_eq m1 m2 = match m1, m2 with
| MoveAfter id1, MoveAfter id2 -> Id.equal id1 id2
| MoveBefore id1, MoveBefore id2 -> Id.equal id1 id2
| MoveLast, MoveLast -> true
| MoveFirst, MoveFirst -> true
| _ -> false
let rec get_hyp_after h = function
| [] -> error_no_such_hypothesis h
| (hyp,_,_) :: right ->
if Id.equal hyp h then
match right with (id,_,_)::_ -> MoveBefore id | [] -> MoveFirst
else
get_hyp_after h right
let split_sign hfrom hto l =
let rec splitrec left toleft = function
| [] -> error_no_such_hypothesis hfrom
| (hyp,c,typ) as d :: right ->
if Id.equal hyp hfrom then
(left,right,d, toleft || move_location_eq hto MoveLast)
else
let is_toleft = match hto with
| MoveAfter h' | MoveBefore h' -> Id.equal hyp h'
| _ -> false
in
splitrec (d::left) (toleft || is_toleft)
right
in
splitrec [] false l
let hyp_of_move_location = function
| MoveAfter id -> id
| MoveBefore id -> id
| _ -> assert false
let move_hyp toleft (left,(idfrom,_,_ as declfrom),right) hto =
let env = Global.env() in
let test_dep (hyp,c,typ as d) (hyp2,c,typ2 as d2) =
if toleft
then occur_var_in_decl env hyp2 d
else occur_var_in_decl env hyp d2
in
let rec moverec first middle = function
| [] ->
if match hto with MoveFirst | MoveLast -> false | _ -> true then
error_no_such_hypothesis (hyp_of_move_location hto);
List.rev first @ List.rev middle
| (hyp,_,_) :: _ as right when move_location_eq hto (MoveBefore hyp) ->
List.rev first @ List.rev middle @ right
| (hyp,_,_) as d :: right ->
let (first',middle') =
if List.exists (test_dep d) middle then
if not (move_location_eq hto (MoveAfter hyp)) then
(first, d::middle)
else
errorlabstrm "move_hyp" (str "Cannot move " ++ pr_id idfrom ++
Miscprint.pr_move_location pr_id hto ++
str (if toleft then ": it occurs in " else ": it depends on ")
++ pr_id hyp ++ str ".")
else
(d::first, middle)
in
if move_location_eq hto (MoveAfter hyp) then
List.rev first' @ List.rev middle' @ right
else
moverec first' middle' right
in
if toleft then
let right =
List.fold_right push_named_context_val right empty_named_context_val in
List.fold_left (fun sign d -> push_named_context_val d sign)
right (moverec [] [declfrom] left)
else
let right =
List.fold_right push_named_context_val
(moverec [] [declfrom] right) empty_named_context_val in
List.fold_left (fun sign d -> push_named_context_val d sign)
right left
let rename_hyp id1 id2 sign =
apply_to_hyp_and_dependent_on sign id1
(fun (_,b,t) _ -> (id2,b,t))
(fun d _ -> map_named_declaration (replace_vars [id1,mkVar id2]) d)
(**********************************************************************)
(************************************************************************)
(************************************************************************)
(* Implementation of the logical rules *)
Will only be used on terms given to the Refine rule which have meta
variables only in Application and Case
variables only in Application and Case *)
let error_unsupported_deep_meta c =
errorlabstrm "" (strbrk "Application of lemmas whose beta-iota normal " ++
strbrk "form contains metavariables deep inside the term is not " ++
strbrk "supported; try \"refine\" instead.")
let collect_meta_variables c =
let rec collrec deep acc c = match kind_of_term c with
| Meta mv -> if deep then error_unsupported_deep_meta () else mv::acc
| Cast(c,_,_) -> collrec deep acc c
| (App _| Case _) -> fold_constr (collrec deep) acc c
| Proj (_, c) -> collrec deep acc c
| _ -> fold_constr (collrec true) acc c
in
List.rev (collrec false [] c)
let check_meta_variables c =
if not (List.distinct_f Int.compare (collect_meta_variables c)) then
raise (RefinerError (NonLinearProof c))
let check_conv_leq_goal env sigma arg ty conclty =
if !check then
let evm, b = Reductionops.infer_conv env sigma ty conclty in
if b then evm
else raise (RefinerError (BadType (arg,ty,conclty)))
else sigma
exception Stop of constr list
let meta_free_prefix a =
try
let _ = Array.fold_left (fun acc a ->
if occur_meta a then raise (Stop acc)
else a :: acc) [] a
in a
with Stop acc -> Array.rev_of_list acc
let goal_type_of env sigma c =
if !check then type_of env sigma c
else Retyping.get_type_of env sigma c
let rec mk_refgoals sigma goal goalacc conclty trm =
let env = Goal.V82.env sigma goal in
let hyps = Goal.V82.hyps sigma goal in
let mk_goal hyps concl =
Goal.V82.mk_goal sigma hyps concl (Goal.V82.extra sigma goal)
in
if (not !check) && not (occur_meta trm) then
let t'ty = Retyping.get_type_of env sigma trm in
let sigma = check_conv_leq_goal env sigma trm t'ty conclty in
(goalacc,t'ty,sigma,trm)
else
match kind_of_term trm with
| Meta _ ->
let conclty = nf_betaiota sigma conclty in
if !check && occur_meta conclty then
raise (RefinerError (MetaInType conclty));
let (gl,ev,sigma) = mk_goal hyps conclty in
gl::goalacc, conclty, sigma, ev
| Cast (t,k, ty) ->
check_typability env sigma ty;
let sigma = check_conv_leq_goal env sigma trm ty conclty in
let res = mk_refgoals sigma goal goalacc ty t in
* we keep the casts ( in particular VMcast and NATIVEcast ) except
when they are annotating metas
when they are annotating metas *)
if isMeta t then begin
assert (k != VMcast && k != NATIVEcast);
res
end else
let (gls,cty,sigma,ans) = res in
let ans = if ans == t then trm else mkCast(ans,k,ty) in
(gls,cty,sigma,ans)
| App (f,l) ->
let (acc',hdty,sigma,applicand) =
if is_template_polymorphic env f then
let sigma, ty =
(* Template sort-polymorphism of definition and inductive types *)
type_of_global_reference_knowing_conclusion env sigma f conclty
in
goalacc, ty, sigma, f
else
mk_hdgoals sigma goal goalacc f
in
let ((acc'',conclty',sigma), args) = mk_arggoals sigma goal acc' hdty l in
let sigma = check_conv_leq_goal env sigma trm conclty' conclty in
let ans = if applicand == f && args == l then trm else Term.mkApp (applicand, args) in
(acc'',conclty',sigma, ans)
| Proj (p,c) ->
let (acc',cty,sigma,c') = mk_hdgoals sigma goal goalacc c in
let c = mkProj (p, c') in
let ty = get_type_of env sigma c in
(acc',ty,sigma,c)
| Case (ci,p,c,lf) ->
let (acc',lbrty,conclty',sigma,p',c') = mk_casegoals sigma goal goalacc p c in
let sigma = check_conv_leq_goal env sigma trm conclty' conclty in
let (acc'',sigma, rbranches) =
Array.fold_left2
(fun (lacc,sigma,bacc) ty fi ->
let (r,_,s,b') = mk_refgoals sigma goal lacc ty fi in r,s,(b'::bacc))
(acc',sigma,[]) lbrty lf
in
let lf' = Array.rev_of_list rbranches in
let ans =
if p' == p && c' == c && Array.equal (==) lf' lf then trm
else Term.mkCase (ci,p',c',lf')
in
(acc'',conclty',sigma, ans)
| _ ->
if occur_meta trm then
anomaly (Pp.str "refiner called with a meta in non app/case subterm");
let t'ty = goal_type_of env sigma trm in
let sigma = check_conv_leq_goal env sigma trm t'ty conclty in
(goalacc,t'ty,sigma, trm)
Same as mkREFGOALS but without knowing the type of the term . Therefore ,
* Metas should be casted .
* Metas should be casted. *)
and mk_hdgoals sigma goal goalacc trm =
let env = Goal.V82.env sigma goal in
let hyps = Goal.V82.hyps sigma goal in
let mk_goal hyps concl =
Goal.V82.mk_goal sigma hyps concl (Goal.V82.extra sigma goal) in
match kind_of_term trm with
| Cast (c,_, ty) when isMeta c ->
check_typability env sigma ty;
let (gl,ev,sigma) = mk_goal hyps (nf_betaiota sigma ty) in
gl::goalacc,ty,sigma,ev
| Cast (t,_, ty) ->
check_typability env sigma ty;
mk_refgoals sigma goal goalacc ty t
| App (f,l) ->
let (acc',hdty,sigma,applicand) =
if is_template_polymorphic env f
then
let l' = meta_free_prefix l in
(goalacc,type_of_global_reference_knowing_parameters env sigma f l',sigma,f)
else mk_hdgoals sigma goal goalacc f
in
let ((acc'',conclty',sigma), args) = mk_arggoals sigma goal acc' hdty l in
let ans = if applicand == f && args == l then trm else Term.mkApp (applicand, args) in
(acc'',conclty',sigma, ans)
| Case (ci,p,c,lf) ->
let (acc',lbrty,conclty',sigma,p',c') = mk_casegoals sigma goal goalacc p c in
let (acc'',sigma,rbranches) =
Array.fold_left2
(fun (lacc,sigma,bacc) ty fi ->
let (r,_,s,b') = mk_refgoals sigma goal lacc ty fi in r,s,(b'::bacc))
(acc',sigma,[]) lbrty lf
in
let lf' = Array.rev_of_list rbranches in
let ans =
if p' == p && c' == c && Array.equal (==) lf' lf then trm
else Term.mkCase (ci,p',c',lf')
in
(acc'',conclty',sigma, ans)
| Proj (p,c) ->
let (acc',cty,sigma,c') = mk_hdgoals sigma goal goalacc c in
let c = mkProj (p, c') in
let ty = get_type_of env sigma c in
(acc',ty,sigma,c)
| _ ->
if !check && occur_meta trm then
anomaly (Pp.str "refine called with a dependent meta");
goalacc, goal_type_of env sigma trm, sigma, trm
and mk_arggoals sigma goal goalacc funty allargs =
let foldmap (goalacc, funty, sigma) harg =
let t = whd_betadeltaiota (Goal.V82.env sigma goal) sigma funty in
let rec collapse t = match kind_of_term t with
| LetIn (_, c1, _, b) -> collapse (subst1 c1 b)
| _ -> t
in
let t = collapse t in
match kind_of_term t with
| Prod (_, c1, b) ->
let (acc, hargty, sigma, arg) = mk_refgoals sigma goal goalacc c1 harg in
(acc, subst1 harg b, sigma), arg
| _ -> raise (RefinerError (CannotApply (t, harg)))
in
Array.smartfoldmap foldmap (goalacc, funty, sigma) allargs
and mk_casegoals sigma goal goalacc p c =
let env = Goal.V82.env sigma goal in
let (acc',ct,sigma,c') = mk_hdgoals sigma goal goalacc c in
let (acc'',pt,sigma,p') = mk_hdgoals sigma goal acc' p in
let indspec =
try Tacred.find_hnf_rectype env sigma ct
with Not_found -> anomaly (Pp.str "mk_casegoals") in
let (lbrty,conclty) = type_case_branches_with_names env indspec p c in
(acc'',lbrty,conclty,sigma,p',c')
let convert_hyp check sign sigma (id,b,bt as d) =
let env = Global.env() in
let reorder = ref [] in
let sign' =
apply_to_hyp sign id
(fun _ (_,c,ct) _ ->
let env = Global.env_of_context sign in
if check && not (is_conv env sigma bt ct) then
error ("Incorrect change of the type of "^(Id.to_string id)^".");
if check && not (Option.equal (is_conv env sigma) b c) then
error ("Incorrect change of the body of "^(Id.to_string id)^".");
if check then reorder := check_decl_position env sign d;
d) in
reorder_val_context env sign' !reorder
(************************************************************************)
(************************************************************************)
(* Primitive tactics are handled here *)
let prim_refiner r sigma goal =
let env = Goal.V82.env sigma goal in
let sign = Goal.V82.hyps sigma goal in
let cl = Goal.V82.concl sigma goal in
let mk_goal hyps concl =
Goal.V82.mk_goal sigma hyps concl (Goal.V82.extra sigma goal)
in
match r with
(* Logical rules *)
| Cut (b,replace,id,t) ->
(* if !check && not (Retyping.get_sort_of env sigma t) then*)
let (sg1,ev1,sigma) = mk_goal sign (nf_betaiota sigma t) in
let sign,t,cl,sigma =
if replace then
let nexthyp = get_hyp_after id (named_context_of_val sign) in
let sign,t,cl,sigma = clear_hyps2 env sigma (Id.Set.singleton id) sign t cl in
move_hyp false ([],(id,None,t),named_context_of_val sign)
nexthyp,
t,cl,sigma
else
(if !check && mem_named_context id (named_context_of_val sign) then
error ("Variable " ^ Id.to_string id ^ " is already declared.");
push_named_context_val (id,None,t) sign,t,cl,sigma) in
let (sg2,ev2,sigma) =
Goal.V82.mk_goal sigma sign cl (Goal.V82.extra sigma goal) in
let oterm = Term.mkApp (mkNamedLambda id t ev2 , [| ev1 |]) in
let sigma = Goal.V82.partial_solution_to sigma goal sg2 oterm in
if b then ([sg1;sg2],sigma) else ([sg2;sg1],sigma)
| FixRule (f,n,rest,j) ->
let rec check_ind env k cl =
match kind_of_term (strip_outer_cast cl) with
| Prod (na,c1,b) ->
if Int.equal k 1 then
try
fst (find_inductive env sigma c1)
with Not_found ->
error "Cannot do a fixpoint on a non inductive type."
else
check_ind (push_rel (na,None,c1) env) (k-1) b
| _ -> error "Not enough products."
in
let ((sp,_),u) = check_ind env n cl in
let firsts,lasts = List.chop j rest in
let all = firsts@(f,n,cl)::lasts in
let rec mk_sign sign = function
| (f,n,ar)::oth ->
let ((sp',_),u') = check_ind env n ar in
if not (eq_mind sp sp') then
error ("Fixpoints should be on the same " ^
"mutual inductive declaration.");
if !check && mem_named_context f (named_context_of_val sign) then
error
("Name "^Id.to_string f^" already used in the environment");
mk_sign (push_named_context_val (f,None,ar) sign) oth
| [] ->
Evd.Monad.List.map (fun (_,_,c) sigma ->
let gl,ev,sig' =
Goal.V82.mk_goal sigma sign c (Goal.V82.extra sigma goal) in
(gl,ev),sig')
all sigma
in
let (gls_evs,sigma) = mk_sign sign all in
let (gls,evs) = List.split gls_evs in
let ids = List.map pi1 all in
let evs = List.map (Vars.subst_vars (List.rev ids)) evs in
let indxs = Array.of_list (List.map (fun n -> n-1) (List.map pi2 all)) in
let funnames = Array.of_list (List.map (fun i -> Name i) ids) in
let typarray = Array.of_list (List.map pi3 all) in
let bodies = Array.of_list evs in
let oterm = Term.mkFix ((indxs,0),(funnames,typarray,bodies)) in
let sigma = Goal.V82.partial_solution sigma goal oterm in
(gls,sigma)
| Cofix (f,others,j) ->
let rec check_is_coind env cl =
let b = whd_betadeltaiota env sigma cl in
match kind_of_term b with
| Prod (na,c1,b) -> check_is_coind (push_rel (na,None,c1) env) b
| _ ->
try
let _ = find_coinductive env sigma b in ()
with Not_found ->
error ("All methods must construct elements " ^
"in coinductive types.")
in
let firsts,lasts = List.chop j others in
let all = firsts@(f,cl)::lasts in
List.iter (fun (_,c) -> check_is_coind env c) all;
let rec mk_sign sign = function
| (f,ar)::oth ->
(try
(let _ = lookup_named_val f sign in
error "Name already used in the environment.")
with
| Not_found ->
mk_sign (push_named_context_val (f,None,ar) sign) oth)
| [] ->
Evd.Monad.List.map (fun (_,c) sigma ->
let gl,ev,sigma =
Goal.V82.mk_goal sigma sign c (Goal.V82.extra sigma goal) in
(gl,ev),sigma)
all sigma
in
let (gls_evs,sigma) = mk_sign sign all in
let (gls,evs) = List.split gls_evs in
let (ids,types) = List.split all in
let evs = List.map (Vars.subst_vars (List.rev ids)) evs in
let funnames = Array.of_list (List.map (fun i -> Name i) ids) in
let typarray = Array.of_list types in
let bodies = Array.of_list evs in
let oterm = Term.mkCoFix (0,(funnames,typarray,bodies)) in
let sigma = Goal.V82.partial_solution sigma goal oterm in
(gls,sigma)
| Refine c ->
check_meta_variables c;
let (sgl,cl',sigma,oterm) = mk_refgoals sigma goal [] cl c in
let sgl = List.rev sgl in
let sigma = Goal.V82.partial_solution sigma goal oterm in
(sgl, sigma)
(* And now the structural rules *)
| Thin ids ->
let ids = List.fold_left (fun accu x -> Id.Set.add x accu) Id.Set.empty ids in
let (hyps,concl,nsigma) = clear_hyps env sigma ids sign cl in
let (gl,ev,sigma) =
Goal.V82.mk_goal nsigma hyps concl (Goal.V82.extra nsigma goal)
in
let sigma = Goal.V82.partial_solution_to sigma goal gl ev in
([gl], sigma)
| Move (hfrom, hto) ->
let (left,right,declfrom,toleft) =
split_sign hfrom hto (named_context_of_val sign) in
let hyps' =
move_hyp toleft (left,declfrom,right) hto in
let (gl,ev,sigma) = mk_goal hyps' cl in
let sigma = Goal.V82.partial_solution_to sigma goal gl ev in
([gl], sigma)
| null | https://raw.githubusercontent.com/pirapira/coq2rust/22e8aaefc723bfb324ca2001b2b8e51fcc923543/proofs/logic.ml | ocaml | **********************************************************************
// * This file is distributed under the terms of the
* GNU Lesser General Public License Version 2.1
**********************************************************************
Errors raised by the refiner
Errors raised by the tactics
reduction errors
unification and typing errors
Tells if the refiner should check that the submitted rules do not
produce invalid subgoals
[apply_to_hyp sign id f] splits [sign] into [tail::[id,_,_]::head] and
returns [tail::(f head (id,_,_) (rev tail))]
**********************************************************************
**********************************************************************
Implementation of the structural rules (moving and deleting
hypotheses around)
The Clear tactic: it scans the context for hypotheses to be removed
(instead of iterating on the list of identifier to be removed, which
forces the user to give them in order).
Reordering of the context
faire le minimum d'echanges pour que l'ordre donne soit un
reculees par rapport aux autres (faire le contraire!)
********************************************************************
**********************************************************************
**********************************************************************
Implementation of the logical rules
Template sort-polymorphism of definition and inductive types
**********************************************************************
**********************************************************************
Primitive tactics are handled here
Logical rules
if !check && not (Retyping.get_sort_of env sigma t) then
And now the structural rules | v * The Coq Proof Assistant / The Coq Development Team
< O _ _ _ , , * INRIA - CNRS - LIX - LRI - PPS - Copyright 1999 - 2012
\VV/ * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
open Pp
open Errors
open Util
open Names
open Nameops
open Term
open Vars
open Context
open Termops
open Environ
open Reductionops
open Inductiveops
open Typing
open Proof_type
open Type_errors
open Retyping
open Misctypes
type refiner_error =
| BadType of constr * constr * constr
| UnresolvedBindings of Name.t list
| CannotApply of constr * constr
| NotWellTyped of constr
| NonLinearProof of constr
| MetaInType of constr
| IntroNeedsProduct
| DoesNotOccurIn of constr * Id.t
| NoSuchHyp of Id.t
exception RefinerError of refiner_error
open Pretype_errors
* FIXME : this is quite brittle . Why not accept any PretypeError ?
let is_typing_error = function
| UnexpectedType (_, _) | NotProduct _
| VarNotFound _ | TypingError _ -> true
| _ -> false
let is_unification_error = function
| CannotUnify _ | CannotUnifyLocal _| CannotGeneralize _
| NoOccurrenceFound _ | CannotUnifyBindingType _
| ActualTypeNotCoercible _ | UnifOccurCheck _
| CannotFindWellTypedAbstraction _ | WrongAbstractionType _
| UnsolvableImplicit _| AbstractionOverMeta _
| UnsatisfiableConstraints _ -> true
| _ -> false
let catchable_exception = function
| Errors.UserError _ | TypeError _
| RefinerError _ | Indrec.RecursionSchemeError _
| Nametab.GlobalizationError _
| Tacred.ReductionTacticError _ -> true
| PretypeError(_,_, e) -> is_unification_error e || is_typing_error e
| _ -> false
let error_no_such_hypothesis id = raise (RefinerError (NoSuchHyp id))
let check = ref false
let with_check = Flags.with_option check
let apply_to_hyp sign id f =
try apply_to_hyp sign id f
with Hyp_not_found ->
if !check then error_no_such_hypothesis id
else sign
let apply_to_hyp_and_dependent_on sign id f g =
try apply_to_hyp_and_dependent_on sign id f g
with Hyp_not_found ->
if !check then error_no_such_hypothesis id
else sign
let check_typability env sigma c =
if !check then let _ = type_of env sigma c in ()
let clear_hyps env sigma ids sign cl =
let evdref = ref (Evd.create_goal_evar_defs sigma) in
let (hyps,cl) = Evarutil.clear_hyps_in_evi env evdref sign cl ids in
(hyps, cl, !evdref)
let clear_hyps2 env sigma ids sign t cl =
let evdref = ref (Evd.create_goal_evar_defs sigma) in
let (hyps,t,cl) = Evarutil.clear_hyps2_in_evi env evdref sign t cl ids in
(hyps, t, cl, !evdref)
The ClearBody tactic
sous - ordre du resultat . Par exemple , 2 hyps non mentionnee ne sont
pas echangees . Choix : les hyps mentionnees ne peuvent qu'etre
let mt_q = (Id.Map.empty,[])
let push_val y = function
(_,[] as q) -> q
| (m, (x,l)::q) -> (m, (x,Id.Set.add y l)::q)
let push_item x v (m,l) =
(Id.Map.add x v m, (x,Id.Set.empty)::l)
let mem_q x (m,_) = Id.Map.mem x m
let find_q x (m,q) =
let v = Id.Map.find x m in
let m' = Id.Map.remove x m in
let rec find accs acc = function
[] -> raise Not_found
| [(x',l)] ->
if Id.equal x x' then ((v,Id.Set.union accs l),(m',List.rev acc))
else raise Not_found
| (x',l as i)::((x'',l'')::q as itl) ->
if Id.equal x x' then
((v,Id.Set.union accs l),
(m',List.rev acc@(x'',Id.Set.add x (Id.Set.union l l''))::q))
else find (Id.Set.union l accs) (i::acc) itl in
find Id.Set.empty [] q
let occur_vars_in_decl env hyps d =
if Id.Set.is_empty hyps then false else
let ohyps = global_vars_set_of_decl env d in
Id.Set.exists (fun h -> Id.Set.mem h ohyps) hyps
let reorder_context env sign ord =
let ords = List.fold_right Id.Set.add ord Id.Set.empty in
if not (Int.equal (List.length ord) (Id.Set.cardinal ords)) then
error "Order list has duplicates";
let rec step ord expected ctxt_head moved_hyps ctxt_tail =
match ord with
| [] -> List.rev ctxt_tail @ ctxt_head
| top::ord' when mem_q top moved_hyps ->
let ((d,h),mh) = find_q top moved_hyps in
if occur_vars_in_decl env h d then
errorlabstrm "reorder_context"
(str "Cannot move declaration " ++ pr_id top ++ spc() ++
str "before " ++
pr_sequence pr_id
(Id.Set.elements (Id.Set.inter h
(global_vars_set_of_decl env d))));
step ord' expected ctxt_head mh (d::ctxt_tail)
| _ ->
(match ctxt_head with
| [] -> error_no_such_hypothesis (List.hd ord)
| (x,_,_ as d) :: ctxt ->
if Id.Set.mem x expected then
step ord (Id.Set.remove x expected)
ctxt (push_item x d moved_hyps) ctxt_tail
else
step ord expected
ctxt (push_val x moved_hyps) (d::ctxt_tail)) in
step ord ords sign mt_q []
let reorder_val_context env sign ord =
val_of_named_context (reorder_context env (named_context_of_val sign) ord)
let check_decl_position env sign (x,_,_ as d) =
let needed = global_vars_set_of_decl env d in
let deps = dependency_closure env (named_context_of_val sign) needed in
if Id.List.mem x deps then
error ("Cannot create self-referring hypothesis "^Id.to_string x);
x::deps
Auxiliary functions for primitive MOVE tactic
*
* [ move_hyp with_dep toleft ( left,(hfrom , typfrom),right ) hto ] moves
* hyp [ hfrom ] at location [ hto ] which belongs to the hyps on the
* left side [ left ] of the full signature if [ toleft = true ] or to the hyps
* on the right side [ right ] if [ toleft = false ] .
* If [ with_dep ] then dependent hypotheses are moved accordingly .
*
* [move_hyp with_dep toleft (left,(hfrom,typfrom),right) hto] moves
* hyp [hfrom] at location [hto] which belongs to the hyps on the
* left side [left] of the full signature if [toleft=true] or to the hyps
* on the right side [right] if [toleft=false].
* If [with_dep] then dependent hypotheses are moved accordingly. *)
let move_location_eq m1 m2 = match m1, m2 with
| MoveAfter id1, MoveAfter id2 -> Id.equal id1 id2
| MoveBefore id1, MoveBefore id2 -> Id.equal id1 id2
| MoveLast, MoveLast -> true
| MoveFirst, MoveFirst -> true
| _ -> false
let rec get_hyp_after h = function
| [] -> error_no_such_hypothesis h
| (hyp,_,_) :: right ->
if Id.equal hyp h then
match right with (id,_,_)::_ -> MoveBefore id | [] -> MoveFirst
else
get_hyp_after h right
let split_sign hfrom hto l =
let rec splitrec left toleft = function
| [] -> error_no_such_hypothesis hfrom
| (hyp,c,typ) as d :: right ->
if Id.equal hyp hfrom then
(left,right,d, toleft || move_location_eq hto MoveLast)
else
let is_toleft = match hto with
| MoveAfter h' | MoveBefore h' -> Id.equal hyp h'
| _ -> false
in
splitrec (d::left) (toleft || is_toleft)
right
in
splitrec [] false l
let hyp_of_move_location = function
| MoveAfter id -> id
| MoveBefore id -> id
| _ -> assert false
let move_hyp toleft (left,(idfrom,_,_ as declfrom),right) hto =
let env = Global.env() in
let test_dep (hyp,c,typ as d) (hyp2,c,typ2 as d2) =
if toleft
then occur_var_in_decl env hyp2 d
else occur_var_in_decl env hyp d2
in
let rec moverec first middle = function
| [] ->
if match hto with MoveFirst | MoveLast -> false | _ -> true then
error_no_such_hypothesis (hyp_of_move_location hto);
List.rev first @ List.rev middle
| (hyp,_,_) :: _ as right when move_location_eq hto (MoveBefore hyp) ->
List.rev first @ List.rev middle @ right
| (hyp,_,_) as d :: right ->
let (first',middle') =
if List.exists (test_dep d) middle then
if not (move_location_eq hto (MoveAfter hyp)) then
(first, d::middle)
else
errorlabstrm "move_hyp" (str "Cannot move " ++ pr_id idfrom ++
Miscprint.pr_move_location pr_id hto ++
str (if toleft then ": it occurs in " else ": it depends on ")
++ pr_id hyp ++ str ".")
else
(d::first, middle)
in
if move_location_eq hto (MoveAfter hyp) then
List.rev first' @ List.rev middle' @ right
else
moverec first' middle' right
in
if toleft then
let right =
List.fold_right push_named_context_val right empty_named_context_val in
List.fold_left (fun sign d -> push_named_context_val d sign)
right (moverec [] [declfrom] left)
else
let right =
List.fold_right push_named_context_val
(moverec [] [declfrom] right) empty_named_context_val in
List.fold_left (fun sign d -> push_named_context_val d sign)
right left
let rename_hyp id1 id2 sign =
apply_to_hyp_and_dependent_on sign id1
(fun (_,b,t) _ -> (id2,b,t))
(fun d _ -> map_named_declaration (replace_vars [id1,mkVar id2]) d)
Will only be used on terms given to the Refine rule which have meta
variables only in Application and Case
variables only in Application and Case *)
let error_unsupported_deep_meta c =
errorlabstrm "" (strbrk "Application of lemmas whose beta-iota normal " ++
strbrk "form contains metavariables deep inside the term is not " ++
strbrk "supported; try \"refine\" instead.")
let collect_meta_variables c =
let rec collrec deep acc c = match kind_of_term c with
| Meta mv -> if deep then error_unsupported_deep_meta () else mv::acc
| Cast(c,_,_) -> collrec deep acc c
| (App _| Case _) -> fold_constr (collrec deep) acc c
| Proj (_, c) -> collrec deep acc c
| _ -> fold_constr (collrec true) acc c
in
List.rev (collrec false [] c)
let check_meta_variables c =
if not (List.distinct_f Int.compare (collect_meta_variables c)) then
raise (RefinerError (NonLinearProof c))
let check_conv_leq_goal env sigma arg ty conclty =
if !check then
let evm, b = Reductionops.infer_conv env sigma ty conclty in
if b then evm
else raise (RefinerError (BadType (arg,ty,conclty)))
else sigma
exception Stop of constr list
let meta_free_prefix a =
try
let _ = Array.fold_left (fun acc a ->
if occur_meta a then raise (Stop acc)
else a :: acc) [] a
in a
with Stop acc -> Array.rev_of_list acc
let goal_type_of env sigma c =
if !check then type_of env sigma c
else Retyping.get_type_of env sigma c
let rec mk_refgoals sigma goal goalacc conclty trm =
let env = Goal.V82.env sigma goal in
let hyps = Goal.V82.hyps sigma goal in
let mk_goal hyps concl =
Goal.V82.mk_goal sigma hyps concl (Goal.V82.extra sigma goal)
in
if (not !check) && not (occur_meta trm) then
let t'ty = Retyping.get_type_of env sigma trm in
let sigma = check_conv_leq_goal env sigma trm t'ty conclty in
(goalacc,t'ty,sigma,trm)
else
match kind_of_term trm with
| Meta _ ->
let conclty = nf_betaiota sigma conclty in
if !check && occur_meta conclty then
raise (RefinerError (MetaInType conclty));
let (gl,ev,sigma) = mk_goal hyps conclty in
gl::goalacc, conclty, sigma, ev
| Cast (t,k, ty) ->
check_typability env sigma ty;
let sigma = check_conv_leq_goal env sigma trm ty conclty in
let res = mk_refgoals sigma goal goalacc ty t in
* we keep the casts ( in particular VMcast and NATIVEcast ) except
when they are annotating metas
when they are annotating metas *)
if isMeta t then begin
assert (k != VMcast && k != NATIVEcast);
res
end else
let (gls,cty,sigma,ans) = res in
let ans = if ans == t then trm else mkCast(ans,k,ty) in
(gls,cty,sigma,ans)
| App (f,l) ->
let (acc',hdty,sigma,applicand) =
if is_template_polymorphic env f then
let sigma, ty =
type_of_global_reference_knowing_conclusion env sigma f conclty
in
goalacc, ty, sigma, f
else
mk_hdgoals sigma goal goalacc f
in
let ((acc'',conclty',sigma), args) = mk_arggoals sigma goal acc' hdty l in
let sigma = check_conv_leq_goal env sigma trm conclty' conclty in
let ans = if applicand == f && args == l then trm else Term.mkApp (applicand, args) in
(acc'',conclty',sigma, ans)
| Proj (p,c) ->
let (acc',cty,sigma,c') = mk_hdgoals sigma goal goalacc c in
let c = mkProj (p, c') in
let ty = get_type_of env sigma c in
(acc',ty,sigma,c)
| Case (ci,p,c,lf) ->
let (acc',lbrty,conclty',sigma,p',c') = mk_casegoals sigma goal goalacc p c in
let sigma = check_conv_leq_goal env sigma trm conclty' conclty in
let (acc'',sigma, rbranches) =
Array.fold_left2
(fun (lacc,sigma,bacc) ty fi ->
let (r,_,s,b') = mk_refgoals sigma goal lacc ty fi in r,s,(b'::bacc))
(acc',sigma,[]) lbrty lf
in
let lf' = Array.rev_of_list rbranches in
let ans =
if p' == p && c' == c && Array.equal (==) lf' lf then trm
else Term.mkCase (ci,p',c',lf')
in
(acc'',conclty',sigma, ans)
| _ ->
if occur_meta trm then
anomaly (Pp.str "refiner called with a meta in non app/case subterm");
let t'ty = goal_type_of env sigma trm in
let sigma = check_conv_leq_goal env sigma trm t'ty conclty in
(goalacc,t'ty,sigma, trm)
Same as mkREFGOALS but without knowing the type of the term . Therefore ,
* Metas should be casted .
* Metas should be casted. *)
and mk_hdgoals sigma goal goalacc trm =
let env = Goal.V82.env sigma goal in
let hyps = Goal.V82.hyps sigma goal in
let mk_goal hyps concl =
Goal.V82.mk_goal sigma hyps concl (Goal.V82.extra sigma goal) in
match kind_of_term trm with
| Cast (c,_, ty) when isMeta c ->
check_typability env sigma ty;
let (gl,ev,sigma) = mk_goal hyps (nf_betaiota sigma ty) in
gl::goalacc,ty,sigma,ev
| Cast (t,_, ty) ->
check_typability env sigma ty;
mk_refgoals sigma goal goalacc ty t
| App (f,l) ->
let (acc',hdty,sigma,applicand) =
if is_template_polymorphic env f
then
let l' = meta_free_prefix l in
(goalacc,type_of_global_reference_knowing_parameters env sigma f l',sigma,f)
else mk_hdgoals sigma goal goalacc f
in
let ((acc'',conclty',sigma), args) = mk_arggoals sigma goal acc' hdty l in
let ans = if applicand == f && args == l then trm else Term.mkApp (applicand, args) in
(acc'',conclty',sigma, ans)
| Case (ci,p,c,lf) ->
let (acc',lbrty,conclty',sigma,p',c') = mk_casegoals sigma goal goalacc p c in
let (acc'',sigma,rbranches) =
Array.fold_left2
(fun (lacc,sigma,bacc) ty fi ->
let (r,_,s,b') = mk_refgoals sigma goal lacc ty fi in r,s,(b'::bacc))
(acc',sigma,[]) lbrty lf
in
let lf' = Array.rev_of_list rbranches in
let ans =
if p' == p && c' == c && Array.equal (==) lf' lf then trm
else Term.mkCase (ci,p',c',lf')
in
(acc'',conclty',sigma, ans)
| Proj (p,c) ->
let (acc',cty,sigma,c') = mk_hdgoals sigma goal goalacc c in
let c = mkProj (p, c') in
let ty = get_type_of env sigma c in
(acc',ty,sigma,c)
| _ ->
if !check && occur_meta trm then
anomaly (Pp.str "refine called with a dependent meta");
goalacc, goal_type_of env sigma trm, sigma, trm
and mk_arggoals sigma goal goalacc funty allargs =
let foldmap (goalacc, funty, sigma) harg =
let t = whd_betadeltaiota (Goal.V82.env sigma goal) sigma funty in
let rec collapse t = match kind_of_term t with
| LetIn (_, c1, _, b) -> collapse (subst1 c1 b)
| _ -> t
in
let t = collapse t in
match kind_of_term t with
| Prod (_, c1, b) ->
let (acc, hargty, sigma, arg) = mk_refgoals sigma goal goalacc c1 harg in
(acc, subst1 harg b, sigma), arg
| _ -> raise (RefinerError (CannotApply (t, harg)))
in
Array.smartfoldmap foldmap (goalacc, funty, sigma) allargs
and mk_casegoals sigma goal goalacc p c =
let env = Goal.V82.env sigma goal in
let (acc',ct,sigma,c') = mk_hdgoals sigma goal goalacc c in
let (acc'',pt,sigma,p') = mk_hdgoals sigma goal acc' p in
let indspec =
try Tacred.find_hnf_rectype env sigma ct
with Not_found -> anomaly (Pp.str "mk_casegoals") in
let (lbrty,conclty) = type_case_branches_with_names env indspec p c in
(acc'',lbrty,conclty,sigma,p',c')
let convert_hyp check sign sigma (id,b,bt as d) =
let env = Global.env() in
let reorder = ref [] in
let sign' =
apply_to_hyp sign id
(fun _ (_,c,ct) _ ->
let env = Global.env_of_context sign in
if check && not (is_conv env sigma bt ct) then
error ("Incorrect change of the type of "^(Id.to_string id)^".");
if check && not (Option.equal (is_conv env sigma) b c) then
error ("Incorrect change of the body of "^(Id.to_string id)^".");
if check then reorder := check_decl_position env sign d;
d) in
reorder_val_context env sign' !reorder
let prim_refiner r sigma goal =
let env = Goal.V82.env sigma goal in
let sign = Goal.V82.hyps sigma goal in
let cl = Goal.V82.concl sigma goal in
let mk_goal hyps concl =
Goal.V82.mk_goal sigma hyps concl (Goal.V82.extra sigma goal)
in
match r with
| Cut (b,replace,id,t) ->
let (sg1,ev1,sigma) = mk_goal sign (nf_betaiota sigma t) in
let sign,t,cl,sigma =
if replace then
let nexthyp = get_hyp_after id (named_context_of_val sign) in
let sign,t,cl,sigma = clear_hyps2 env sigma (Id.Set.singleton id) sign t cl in
move_hyp false ([],(id,None,t),named_context_of_val sign)
nexthyp,
t,cl,sigma
else
(if !check && mem_named_context id (named_context_of_val sign) then
error ("Variable " ^ Id.to_string id ^ " is already declared.");
push_named_context_val (id,None,t) sign,t,cl,sigma) in
let (sg2,ev2,sigma) =
Goal.V82.mk_goal sigma sign cl (Goal.V82.extra sigma goal) in
let oterm = Term.mkApp (mkNamedLambda id t ev2 , [| ev1 |]) in
let sigma = Goal.V82.partial_solution_to sigma goal sg2 oterm in
if b then ([sg1;sg2],sigma) else ([sg2;sg1],sigma)
| FixRule (f,n,rest,j) ->
let rec check_ind env k cl =
match kind_of_term (strip_outer_cast cl) with
| Prod (na,c1,b) ->
if Int.equal k 1 then
try
fst (find_inductive env sigma c1)
with Not_found ->
error "Cannot do a fixpoint on a non inductive type."
else
check_ind (push_rel (na,None,c1) env) (k-1) b
| _ -> error "Not enough products."
in
let ((sp,_),u) = check_ind env n cl in
let firsts,lasts = List.chop j rest in
let all = firsts@(f,n,cl)::lasts in
let rec mk_sign sign = function
| (f,n,ar)::oth ->
let ((sp',_),u') = check_ind env n ar in
if not (eq_mind sp sp') then
error ("Fixpoints should be on the same " ^
"mutual inductive declaration.");
if !check && mem_named_context f (named_context_of_val sign) then
error
("Name "^Id.to_string f^" already used in the environment");
mk_sign (push_named_context_val (f,None,ar) sign) oth
| [] ->
Evd.Monad.List.map (fun (_,_,c) sigma ->
let gl,ev,sig' =
Goal.V82.mk_goal sigma sign c (Goal.V82.extra sigma goal) in
(gl,ev),sig')
all sigma
in
let (gls_evs,sigma) = mk_sign sign all in
let (gls,evs) = List.split gls_evs in
let ids = List.map pi1 all in
let evs = List.map (Vars.subst_vars (List.rev ids)) evs in
let indxs = Array.of_list (List.map (fun n -> n-1) (List.map pi2 all)) in
let funnames = Array.of_list (List.map (fun i -> Name i) ids) in
let typarray = Array.of_list (List.map pi3 all) in
let bodies = Array.of_list evs in
let oterm = Term.mkFix ((indxs,0),(funnames,typarray,bodies)) in
let sigma = Goal.V82.partial_solution sigma goal oterm in
(gls,sigma)
| Cofix (f,others,j) ->
let rec check_is_coind env cl =
let b = whd_betadeltaiota env sigma cl in
match kind_of_term b with
| Prod (na,c1,b) -> check_is_coind (push_rel (na,None,c1) env) b
| _ ->
try
let _ = find_coinductive env sigma b in ()
with Not_found ->
error ("All methods must construct elements " ^
"in coinductive types.")
in
let firsts,lasts = List.chop j others in
let all = firsts@(f,cl)::lasts in
List.iter (fun (_,c) -> check_is_coind env c) all;
let rec mk_sign sign = function
| (f,ar)::oth ->
(try
(let _ = lookup_named_val f sign in
error "Name already used in the environment.")
with
| Not_found ->
mk_sign (push_named_context_val (f,None,ar) sign) oth)
| [] ->
Evd.Monad.List.map (fun (_,c) sigma ->
let gl,ev,sigma =
Goal.V82.mk_goal sigma sign c (Goal.V82.extra sigma goal) in
(gl,ev),sigma)
all sigma
in
let (gls_evs,sigma) = mk_sign sign all in
let (gls,evs) = List.split gls_evs in
let (ids,types) = List.split all in
let evs = List.map (Vars.subst_vars (List.rev ids)) evs in
let funnames = Array.of_list (List.map (fun i -> Name i) ids) in
let typarray = Array.of_list types in
let bodies = Array.of_list evs in
let oterm = Term.mkCoFix (0,(funnames,typarray,bodies)) in
let sigma = Goal.V82.partial_solution sigma goal oterm in
(gls,sigma)
| Refine c ->
check_meta_variables c;
let (sgl,cl',sigma,oterm) = mk_refgoals sigma goal [] cl c in
let sgl = List.rev sgl in
let sigma = Goal.V82.partial_solution sigma goal oterm in
(sgl, sigma)
| Thin ids ->
let ids = List.fold_left (fun accu x -> Id.Set.add x accu) Id.Set.empty ids in
let (hyps,concl,nsigma) = clear_hyps env sigma ids sign cl in
let (gl,ev,sigma) =
Goal.V82.mk_goal nsigma hyps concl (Goal.V82.extra nsigma goal)
in
let sigma = Goal.V82.partial_solution_to sigma goal gl ev in
([gl], sigma)
| Move (hfrom, hto) ->
let (left,right,declfrom,toleft) =
split_sign hfrom hto (named_context_of_val sign) in
let hyps' =
move_hyp toleft (left,declfrom,right) hto in
let (gl,ev,sigma) = mk_goal hyps' cl in
let sigma = Goal.V82.partial_solution_to sigma goal gl ev in
([gl], sigma)
|
2c9c3621b77bf1c4ae2b689ec5378394967b04aad7cd9f410055e38977adbe86 | imdea-software/leap | Global.mli |
(***********************************************************************)
(* *)
LEAP
(* *)
, IMDEA Software Institute
(* *)
(* *)
Copyright 2011 IMDEA Software Institute
(* *)
Licensed under the Apache License , Version 2.0 ( the " License " ) ;
you may not use this file except in compliance with the License .
(* You may obtain a copy of the License at *)
(* *)
(* -2.0 *)
(* *)
(* Unless required by applicable law or agreed to in writing, *)
software distributed under the License is distributed on an
" AS IS " BASIS , WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND ,
(* either express or implied. *)
(* See the License for the specific language governing permissions *)
(* and limitations under the License. *)
(* *)
(***********************************************************************)
exception ParserError of string
val last : string -> unit
val get_last : unit -> string
val reset_linenum : unit -> unit
val incr_linenum : unit -> unit
val get_linenum : unit -> int
val pln : unit -> unit
| null | https://raw.githubusercontent.com/imdea-software/leap/5f946163c0f80ff9162db605a75b7ce2e27926ef/src/misc/Global.mli | ocaml | *********************************************************************
You may obtain a copy of the License at
-2.0
Unless required by applicable law or agreed to in writing,
either express or implied.
See the License for the specific language governing permissions
and limitations under the License.
********************************************************************* |
LEAP
, IMDEA Software Institute
Copyright 2011 IMDEA Software Institute
Licensed under the Apache License , Version 2.0 ( the " License " ) ;
you may not use this file except in compliance with the License .
software distributed under the License is distributed on an
" AS IS " BASIS , WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND ,
exception ParserError of string
val last : string -> unit
val get_last : unit -> string
val reset_linenum : unit -> unit
val incr_linenum : unit -> unit
val get_linenum : unit -> int
val pln : unit -> unit
|
37690aef298510a76c6a3bbbfe4f66ef2a850218a0d1e7881b042f4eb71ab76e | danieljharvey/mimsa | Interpret.hs | module Language.Mimsa.Interpreter.Interpret (interpret, addEmptyStackFrames) where
import Control.Monad.Reader
import Data.Functor
import Data.HashMap.Strict (HashMap)
import Data.Hashable
import Language.Mimsa.Core
import Language.Mimsa.Interpreter.App
import Language.Mimsa.Interpreter.If
import Language.Mimsa.Interpreter.Infix
import Language.Mimsa.Interpreter.Let
import Language.Mimsa.Interpreter.Monad
import Language.Mimsa.Interpreter.PatternMatch
import Language.Mimsa.Interpreter.RecordAccess
import Language.Mimsa.Interpreter.Types
import Language.Mimsa.Types.Error.InterpreterError
import Language.Mimsa.Types.Interpreter.Stack
import Language.Mimsa.Types.Store.ExprHash
import Language.Mimsa.Types.Typechecker.Unique
initialStack :: (Ord var, Hashable var) => StackFrame var ann
initialStack = StackFrame mempty mempty
addEmptyStackFrames ::
(Hashable var, Monoid ann) =>
Expr (var, Unique) ann ->
Expr (var, Unique) (ExprData var ann)
addEmptyStackFrames expr =
expr $> mempty
interpret ::
(Eq ann, Ord var, Hashable var, Show var, Printer var, Monoid ann, Show ann) =>
HashMap ExprHash (InterpretExpr var ann) ->
HashMap InfixOp ExprHash ->
InterpretExpr var ann ->
Either (InterpreterError var ann) (InterpretExpr var ann)
interpret deps infixes expr =
runReaderT (interpretExpr expr) (InterpretReaderEnv initialStack deps infixes)
-- somewhat pointless separate function to make debug logging each value out
-- easier
interpretExpr ::
(Eq ann, Ord var, Hashable var, Show var, Printer var, Monoid ann, Show ann) =>
InterpretExpr var ann ->
InterpreterM var ann (InterpretExpr var ann)
interpretExpr =
interpretExpr'
interpretExpr' ::
(Eq ann, Ord var, Hashable var, Show var, Printer var, Monoid ann, Show ann) =>
InterpretExpr var ann ->
InterpreterM var ann (InterpretExpr var ann)
interpretExpr' (MyLiteral _ val) = pure (MyLiteral mempty val)
interpretExpr' (MyAnnotation _ _ expr) = interpretExpr' expr
interpretExpr' (MyLet _ ident expr body) =
interpretLet interpretExpr ident expr body
interpretExpr' (MyVar _ _ var) =
lookupVar var >>= interpretExpr
interpretExpr' (MyLambda (ExprData current isRec ann) ident body) = do
-- capture current environment
stackFrame <-
getCurrentStackFrame
-- add it to already captured vars
let newExprData =
ExprData
(current <> stackFrame)
isRec
ann
-- return it
pure
(MyLambda newExprData ident body)
interpretExpr' (MyTuple ann a as) =
MyTuple ann <$> interpretExpr a <*> traverse interpretExpr as
interpretExpr' (MyInfix _ op a b) =
interpretInfix interpretExpr op a b
interpretExpr' (MyIf ann predExpr thenExpr elseExpr) =
interpretIf interpretExpr ann predExpr thenExpr elseExpr
interpretExpr' (MyApp ann fn a) =
interpretApp interpretExpr ann fn a
interpretExpr' (MyRecordAccess ann expr name) =
interpretRecordAccess interpretExpr ann expr name
interpretExpr' (MyTupleAccess ann expr index) =
interpretTupleAccess interpretExpr ann expr index
interpretExpr' (MyPatternMatch _ matchExpr patterns) = do
interpretPatternMatch interpretExpr matchExpr patterns
interpretExpr' (MyLetPattern _ pat patExpr body) =
interpretLetPattern interpretExpr pat patExpr body
interpretExpr' (MyRecord ann as) =
MyRecord ann <$> traverse interpretExpr as
interpretExpr' (MyArray ann as) =
MyArray ann <$> traverse interpretExpr as
interpretExpr' (MyConstructor as modName const') =
pure (MyConstructor as modName const')
interpretExpr' (MyTypedHole ann name) =
pure (MyTypedHole ann name)
| null | https://raw.githubusercontent.com/danieljharvey/mimsa/d6a5d1933b82268458b1489c1d087e96b0d8e8fc/compiler/src/Language/Mimsa/Interpreter/Interpret.hs | haskell | somewhat pointless separate function to make debug logging each value out
easier
capture current environment
add it to already captured vars
return it | module Language.Mimsa.Interpreter.Interpret (interpret, addEmptyStackFrames) where
import Control.Monad.Reader
import Data.Functor
import Data.HashMap.Strict (HashMap)
import Data.Hashable
import Language.Mimsa.Core
import Language.Mimsa.Interpreter.App
import Language.Mimsa.Interpreter.If
import Language.Mimsa.Interpreter.Infix
import Language.Mimsa.Interpreter.Let
import Language.Mimsa.Interpreter.Monad
import Language.Mimsa.Interpreter.PatternMatch
import Language.Mimsa.Interpreter.RecordAccess
import Language.Mimsa.Interpreter.Types
import Language.Mimsa.Types.Error.InterpreterError
import Language.Mimsa.Types.Interpreter.Stack
import Language.Mimsa.Types.Store.ExprHash
import Language.Mimsa.Types.Typechecker.Unique
initialStack :: (Ord var, Hashable var) => StackFrame var ann
initialStack = StackFrame mempty mempty
addEmptyStackFrames ::
(Hashable var, Monoid ann) =>
Expr (var, Unique) ann ->
Expr (var, Unique) (ExprData var ann)
addEmptyStackFrames expr =
expr $> mempty
interpret ::
(Eq ann, Ord var, Hashable var, Show var, Printer var, Monoid ann, Show ann) =>
HashMap ExprHash (InterpretExpr var ann) ->
HashMap InfixOp ExprHash ->
InterpretExpr var ann ->
Either (InterpreterError var ann) (InterpretExpr var ann)
interpret deps infixes expr =
runReaderT (interpretExpr expr) (InterpretReaderEnv initialStack deps infixes)
interpretExpr ::
(Eq ann, Ord var, Hashable var, Show var, Printer var, Monoid ann, Show ann) =>
InterpretExpr var ann ->
InterpreterM var ann (InterpretExpr var ann)
interpretExpr =
interpretExpr'
interpretExpr' ::
(Eq ann, Ord var, Hashable var, Show var, Printer var, Monoid ann, Show ann) =>
InterpretExpr var ann ->
InterpreterM var ann (InterpretExpr var ann)
interpretExpr' (MyLiteral _ val) = pure (MyLiteral mempty val)
interpretExpr' (MyAnnotation _ _ expr) = interpretExpr' expr
interpretExpr' (MyLet _ ident expr body) =
interpretLet interpretExpr ident expr body
interpretExpr' (MyVar _ _ var) =
lookupVar var >>= interpretExpr
interpretExpr' (MyLambda (ExprData current isRec ann) ident body) = do
stackFrame <-
getCurrentStackFrame
let newExprData =
ExprData
(current <> stackFrame)
isRec
ann
pure
(MyLambda newExprData ident body)
interpretExpr' (MyTuple ann a as) =
MyTuple ann <$> interpretExpr a <*> traverse interpretExpr as
interpretExpr' (MyInfix _ op a b) =
interpretInfix interpretExpr op a b
interpretExpr' (MyIf ann predExpr thenExpr elseExpr) =
interpretIf interpretExpr ann predExpr thenExpr elseExpr
interpretExpr' (MyApp ann fn a) =
interpretApp interpretExpr ann fn a
interpretExpr' (MyRecordAccess ann expr name) =
interpretRecordAccess interpretExpr ann expr name
interpretExpr' (MyTupleAccess ann expr index) =
interpretTupleAccess interpretExpr ann expr index
interpretExpr' (MyPatternMatch _ matchExpr patterns) = do
interpretPatternMatch interpretExpr matchExpr patterns
interpretExpr' (MyLetPattern _ pat patExpr body) =
interpretLetPattern interpretExpr pat patExpr body
interpretExpr' (MyRecord ann as) =
MyRecord ann <$> traverse interpretExpr as
interpretExpr' (MyArray ann as) =
MyArray ann <$> traverse interpretExpr as
interpretExpr' (MyConstructor as modName const') =
pure (MyConstructor as modName const')
interpretExpr' (MyTypedHole ann name) =
pure (MyTypedHole ann name)
|
7c61abcbcb85ae76edb4a98a5c3aa1ea47a8ac8aa798a2fc3727dfa45f4777d5 | ijvcms/chuanqi_dev | mod_ets_holder.erl | %%%-------------------------------------------------------------------
%%% @author qhb
%%% @doc
%%% ets缓存挂载进程
%%% @end
%%%-------------------------------------------------------------------
-module(mod_ets_holder).
-behaviour(gen_server).
-include("common.hrl").
-export([init/1, handle_call/3, handle_cast/2, handle_info/2, terminate/2, code_change/3]).
%% ====================================================================
%% API functions
%% ====================================================================
-export([
start_link/1
]).
{ configs , ConfigList }
%% {modlib, ModLibList}
start_link(Mode) ->
gen_server:start_link(?MODULE, [Mode], []).
%% ====================================================================
%% Behavioural functions
%% ====================================================================
-record(state, {}).
%% init/1
%% ====================================================================
init([Mode]) ->
?INFO("mod_ets_holder init ~p", [Mode]),
do_init(Mode),
{ok, #state{}}.
%% handle_call/3
%% ====================================================================
handle_call(_Request, _From, State) ->
Reply = ok,
{reply, Reply, State}.
%% handle_cast/2
%% ====================================================================
handle_cast(_Msg, State) ->
{noreply, State}.
%% handle_info/2
%% ====================================================================
handle_info(_Info, State) ->
{noreply, State}.
%% terminate/2
%% ====================================================================
terminate(_Reason, _State) ->
ok.
%% code_change/3
%% ====================================================================
code_change(_OldVsn, State, _Extra) ->
{ok, State}.
%% ====================================================================
Internal functions
%% ====================================================================
do_init({configs, ConfigList}) ->
lists:foreach(fun(Cfg) ->
{EtsName, Option} = Cfg,
ets:new(EtsName, Option)
end, ConfigList),
ok;
do_init({modlib, []}) ->
ok;
do_init({modlib, [{M} | Last]}) ->
erlang:apply(M, init_local, []),
do_init({modlib, Last});
do_init({modlib, [{M, A} | Last]}) ->
erlang:apply(M, init_local, A),
do_init({modlib, Last});
do_init({modlib, [{M, F, A} | Last]}) ->
erlang:apply(M, F, A),
do_init({modlib, Last});
do_init({modlib, [H | Last]}) ->
?WARNING("mod_ets_holder error ~p", [H]),
do_init({modlib, Last}).
| null | https://raw.githubusercontent.com/ijvcms/chuanqi_dev/7742184bded15f25be761c4f2d78834249d78097/server/trunk/server/src/system/ets/mod_ets_holder.erl | erlang | -------------------------------------------------------------------
@author qhb
@doc
ets缓存挂载进程
@end
-------------------------------------------------------------------
====================================================================
API functions
====================================================================
{modlib, ModLibList}
====================================================================
Behavioural functions
====================================================================
init/1
====================================================================
handle_call/3
====================================================================
handle_cast/2
====================================================================
handle_info/2
====================================================================
terminate/2
====================================================================
code_change/3
====================================================================
====================================================================
==================================================================== |
-module(mod_ets_holder).
-behaviour(gen_server).
-include("common.hrl").
-export([init/1, handle_call/3, handle_cast/2, handle_info/2, terminate/2, code_change/3]).
-export([
start_link/1
]).
{ configs , ConfigList }
start_link(Mode) ->
gen_server:start_link(?MODULE, [Mode], []).
-record(state, {}).
init([Mode]) ->
?INFO("mod_ets_holder init ~p", [Mode]),
do_init(Mode),
{ok, #state{}}.
handle_call(_Request, _From, State) ->
Reply = ok,
{reply, Reply, State}.
handle_cast(_Msg, State) ->
{noreply, State}.
handle_info(_Info, State) ->
{noreply, State}.
terminate(_Reason, _State) ->
ok.
code_change(_OldVsn, State, _Extra) ->
{ok, State}.
Internal functions
do_init({configs, ConfigList}) ->
lists:foreach(fun(Cfg) ->
{EtsName, Option} = Cfg,
ets:new(EtsName, Option)
end, ConfigList),
ok;
do_init({modlib, []}) ->
ok;
do_init({modlib, [{M} | Last]}) ->
erlang:apply(M, init_local, []),
do_init({modlib, Last});
do_init({modlib, [{M, A} | Last]}) ->
erlang:apply(M, init_local, A),
do_init({modlib, Last});
do_init({modlib, [{M, F, A} | Last]}) ->
erlang:apply(M, F, A),
do_init({modlib, Last});
do_init({modlib, [H | Last]}) ->
?WARNING("mod_ets_holder error ~p", [H]),
do_init({modlib, Last}).
|
a87815306bb684c59c38e75d56ba7f5405d87e215c3a8e1efc00650639bf4b77 | returntocorp/semgrep | metavar_arg.ml | (* ERROR: *)
let foo = foo 1 2
(* ERROR: *)
let foo1 = foo a_very_long_constant_name 2
(* ERROR: *)
let foo2 = foo unsafe (* indeed *) 2
(* ERROR: *)
let foo3 = foo (bar 1 3) 2
let foo4 = foo 2 1
| null | https://raw.githubusercontent.com/returntocorp/semgrep/8c379c2c73b4ece1b1316655b00e21a8af3f00bb/tests/patterns/ocaml/metavar_arg.ml | ocaml | ERROR:
ERROR:
ERROR:
indeed
ERROR: | let foo = foo 1 2
let foo1 = foo a_very_long_constant_name 2
let foo3 = foo (bar 1 3) 2
let foo4 = foo 2 1
|
0cbc788b83f0edebf370fdd97052375a8df4a7672319a28c69a10f5586dce568 | Clozure/ccl-tests | probe-file.lsp | ;-*- Mode: Lisp -*-
Author :
Created : Mon Jan 5 20:46:29 2004
;;;; Contains: Tests of PROBE-FILE
(in-package :cl-test)
(deftest probe-file.1
(probe-file #p"nonexistent")
nil)
(deftest probe-file.2
(let ((s (open #p"probe-file.lsp" :direction :input)))
(prog1
(equalpt (truename #p"probe-file.lsp")
(probe-file s))
(close s)))
t)
(deftest probe-file.3
(let ((s (open #p"probe-file.lsp" :direction :input)))
(close s)
(equalpt (truename #p"probe-file.lsp")
(probe-file s)))
t)
(deftest probe-file.4
(equalpt (truename #p"probe-file.lsp")
(probe-file "CLTEST:probe-file.lsp"))
t)
Specialized string tests
(deftest probe-file.5
(do-special-strings
(str "probe-file.lsp" nil)
(let ((s (open str :direction :input)))
(assert (equalpt (truename #p"probe-file.lsp") (probe-file s)))
(close s)))
nil)
;;; Error tests
(deftest probe-file.error.1
(signals-error (probe-file) program-error)
t)
(deftest probe-file.error.2
(signals-error (probe-file #p"probe-file.lsp" nil) program-error)
t)
(deftest probe-file.error.3
(signals-error-always (probe-file (make-pathname :name :wild)) file-error)
t t)
(deftest probe-file.error.4
(signals-error-always (probe-file "CLTEST:*.FOO") file-error)
t t)
| null | https://raw.githubusercontent.com/Clozure/ccl-tests/0478abddb34dbc16487a1975560d8d073a988060/ansi-tests/probe-file.lsp | lisp | -*- Mode: Lisp -*-
Contains: Tests of PROBE-FILE
Error tests | Author :
Created : Mon Jan 5 20:46:29 2004
(in-package :cl-test)
(deftest probe-file.1
(probe-file #p"nonexistent")
nil)
(deftest probe-file.2
(let ((s (open #p"probe-file.lsp" :direction :input)))
(prog1
(equalpt (truename #p"probe-file.lsp")
(probe-file s))
(close s)))
t)
(deftest probe-file.3
(let ((s (open #p"probe-file.lsp" :direction :input)))
(close s)
(equalpt (truename #p"probe-file.lsp")
(probe-file s)))
t)
(deftest probe-file.4
(equalpt (truename #p"probe-file.lsp")
(probe-file "CLTEST:probe-file.lsp"))
t)
Specialized string tests
(deftest probe-file.5
(do-special-strings
(str "probe-file.lsp" nil)
(let ((s (open str :direction :input)))
(assert (equalpt (truename #p"probe-file.lsp") (probe-file s)))
(close s)))
nil)
(deftest probe-file.error.1
(signals-error (probe-file) program-error)
t)
(deftest probe-file.error.2
(signals-error (probe-file #p"probe-file.lsp" nil) program-error)
t)
(deftest probe-file.error.3
(signals-error-always (probe-file (make-pathname :name :wild)) file-error)
t t)
(deftest probe-file.error.4
(signals-error-always (probe-file "CLTEST:*.FOO") file-error)
t t)
|
bc07668d3c695d6d3305125024d160d62224eb1b3852d36bc837e236a3a91940 | mbutterick/beautiful-racket | sample-math.rkt | #lang basic-demo-2
10 rem all results should be 1
20 print 1 - 2 * 3 + 4 * 5 - 6 = 9
30 print (1 - 2) * (3 + 4) * (5 - 6) = 7
40 print 1 / 4 = .25
50 print 2 ^ 3 = 8
60 print 9 ^ 0.5 = 3
70 print 6 mod 2 = 0
80 print 5 mod 2 = 1
| null | https://raw.githubusercontent.com/mbutterick/beautiful-racket/f0e2cb5b325733b3f9cbd554cc7d2bb236af9ee9/beautiful-racket-demo/basic-demo-2/sample-math.rkt | racket | #lang basic-demo-2
10 rem all results should be 1
20 print 1 - 2 * 3 + 4 * 5 - 6 = 9
30 print (1 - 2) * (3 + 4) * (5 - 6) = 7
40 print 1 / 4 = .25
50 print 2 ^ 3 = 8
60 print 9 ^ 0.5 = 3
70 print 6 mod 2 = 0
80 print 5 mod 2 = 1
| |
78f272fec3b9ed1ba338d4470f9e53e5824353ca0d4aa2a3c440150c271569a4 | tov/dssl2 | default-timeout-slow.rkt | #lang dssl2
assert time < 1
assert sleep(2) is None, time < 10
assert time < 4
assert sleep(2) is None
| null | https://raw.githubusercontent.com/tov/dssl2/105d18069465781bd9b87466f8336d5ce9e9a0f3/test/dssl2/default-timeout-slow.rkt | racket | #lang dssl2
assert time < 1
assert sleep(2) is None, time < 10
assert time < 4
assert sleep(2) is None
| |
440bcdbe0684f12744b72bea99fec9cfde85d964c3e4c49d2f3429dc80cf83bc | cram2/cram | gamma.lisp | Regression test GAMMA for GSLL , automatically generated
;;
Copyright 2009 , 2010
Distributed under the terms of the GNU General Public License
;;
;; This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation , either version 3 of the License , or
;; (at your option) any later version.
;;
;; This program is distributed in the hope that it will be useful,
;; but WITHOUT ANY WARRANTY; without even the implied warranty of
;; MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
;; GNU General Public License for more details.
;;
You should have received a copy of the GNU General Public License
;; along with this program. If not, see </>.
(in-package :gsl)
(LISP-UNIT:DEFINE-TEST GAMMA
Semi - automatically converted from specfunc / test_gamma.c
(ASSERT-TO-TOLERANCE (LOG-GAMMA -0.1d0) 2.368961332728788655d0 +TEST-TOL0+)
(ASSERT-TO-TOLERANCE (LOG-GAMMA (- (/ 1.0d0 256.0d0))) 5.547444766967471595d0 +TEST-TOL0+)
(ASSERT-TO-TOLERANCE (LOG-GAMMA 1.0d-08) 18.420680738180208905d0 +TEST-TOL0+)
(ASSERT-TO-TOLERANCE (LOG-GAMMA 0.1d0) 2.252712651734205d0 +TEST-TOL0+)
(ASSERT-TO-TOLERANCE (LOG-GAMMA (+ 1.0d0 (/ 1.0d0 256.0d0))) -0.0022422226599611501448d0 +TEST-TOL0+)
(ASSERT-TO-TOLERANCE (LOG-GAMMA (+ 2.0d0 (/ 1.0d0 256.0d0))) 0.0016564177556961728692d0 +TEST-TOL0+)
(ASSERT-TO-TOLERANCE (LOG-GAMMA 100.0d0) 359.1342053695753d0 +TEST-TOL0+)
(ASSERT-TO-TOLERANCE (LOG-GAMMA (+ (- 1.0d0) (- (/ 1.0d0 65536.0d0)))) 11.090348438090047844d0 +TEST-TOL0+)
(ASSERT-TO-TOLERANCE (LOG-GAMMA (+ (- 1.0d0) (- (/ 1.0d0 2.68435456d8)))) 19.408121054103474300d0 +TEST-TOL0+)
(ASSERT-TO-TOLERANCE (LOG-GAMMA -100.5d0) -364.9009683094273518d0 +TEST-TOL0+)
(ASSERT-TO-TOLERANCE (LOG-GAMMA (+ (- 100) (- (/ 1.0d0 65536.0d0)))) -352.6490910117097874d0 +TEST-TOL0+)
(assert-to-tolerance (log-gamma-sign 0.7d0)
'(0.26086724653166651439d0 1.0d0) +test-tol1+)
(assert-to-tolerance (log-gamma-sign 0.1d0)
'(2.2527126517342059599d0 1.0d0) +test-tol0+)
(assert-to-tolerance (log-gamma-sign -0.1d0)
'(2.368961332728788655d0 -1.0d0) +test-tol0+)
(assert-to-tolerance (log-gamma-sign (+ -1.0d0 (/ -65536.0d0)))
'(11.090348438090047844d0 1.0d0) +test-tol0+)
(assert-to-tolerance (log-gamma-sign (+ -2.0d0 (/ -256.0d0)))
'(4.848447725860607213d0 -1.0d0) +test-tol0+)
(assert-to-tolerance (log-gamma-sign (+ -2.0d0 (/ -65536.0d0)))
'(10.397193628164674967d0 -1.0d0) +test-tol0+)
(assert-to-tolerance (log-gamma-sign (+ -3.0d0 (/ -8.0d0)))
'(0.15431112768404182427d0 1.0d0) +test-tol2+)
(assert-to-tolerance (log-gamma-sign -100.5d0)
'(-364.9009683094273518d0 -1.0d0) +test-tol0+)
(ASSERT-TO-TOLERANCE (GAMMA (+ 1.0d0 (/ 1.0d0 4096.0d0))) 0.9998591371459403421d0 +TEST-TOL0+)
(ASSERT-TO-TOLERANCE (GAMMA (+ 1.0d0 (/ 1.0d0 32.0d0))) 0.9829010992836269148d0 +TEST-TOL0+)
(ASSERT-TO-TOLERANCE (GAMMA (+ 2.0d0 (/ 1.0d0 256.0d0))) 1.0016577903733583299d0 +TEST-TOL0+)
(ASSERT-TO-TOLERANCE (GAMMA 9.0d0) 40320.0d0 +TEST-TOL0+)
(ASSERT-TO-TOLERANCE (GAMMA 10.0d0) 362880.0d0 +TEST-TOL0+)
(ASSERT-TO-TOLERANCE (GAMMA 100.0d0) 9.332621544394415268d+155 +TEST-TOL2+)
(ASSERT-TO-TOLERANCE (GAMMA 170.0d0) 4.269068009004705275d+304 +TEST-TOL2+)
(ASSERT-TO-TOLERANCE (GAMMA 171.0d0) 7.257415615307998967d+306 +TEST-TOL2+)
(ASSERT-TO-TOLERANCE (GAMMA -10.5d0) -2.640121820547716316d-07 +TEST-TOL0+)
(ASSERT-TO-TOLERANCE (GAMMA -11.25d0) 6.027393816261931672d-08 +TEST-TOL0+)
(ASSERT-TO-TOLERANCE (GAMMA (+ (- 1.0d0) (/ 1.0d0 65536.0d0))) -65536.42280587818970d0 +TEST-TOL0+)
(ASSERT-TO-TOLERANCE (GAMMA* 1.0d-08) 3989.423555759890865d0 +TEST-TOL1+)
(ASSERT-TO-TOLERANCE (GAMMA* 1.0d-05) 126.17168469882690233d0 +TEST-TOL0+)
(ASSERT-TO-TOLERANCE (GAMMA* 0.001d0) 12.708492464364073506d0 +TEST-TOL0+)
(ASSERT-TO-TOLERANCE (GAMMA* 1.5d0) 1.0563442442685598666d0 +TEST-TOL0+)
(ASSERT-TO-TOLERANCE (GAMMA* 3.0d0) 1.0280645179187893045d0 +TEST-TOL0+)
(ASSERT-TO-TOLERANCE (GAMMA* 9.0d0) 1.0092984264218189715d0 +TEST-TOL0+)
(ASSERT-TO-TOLERANCE (GAMMA* 11.0d0) 1.0076024283104962850d0 +TEST-TOL0+)
(ASSERT-TO-TOLERANCE (GAMMA* 100.0d0) 1.0008336778720121418d0 +TEST-TOL0+)
(ASSERT-TO-TOLERANCE (GAMMA* 1.0d+05) 1.0000008333336805529d0 +TEST-TOL0+)
(ASSERT-TO-TOLERANCE (GAMMA* 1.0d+20) 1.0d0 +TEST-TOL0+)
(ASSERT-TO-TOLERANCE (1/GAMMA 1.0d0) 1.0d0 +TEST-TOL0+)
(ASSERT-TO-TOLERANCE (1/GAMMA 2.0d0) 1.0d0 +TEST-TOL0+)
(ASSERT-TO-TOLERANCE (1/GAMMA 3.0d0) 0.5d0 +TEST-TOL0+)
(ASSERT-TO-TOLERANCE (1/GAMMA 4.0d0) #.(/ 1.0d0 6.0d0) +TEST-TOL0+)
(ASSERT-TO-TOLERANCE (1/GAMMA 10.0d0) #.(/ 1.0d0 362880.0d0) +TEST-TOL0+)
(ASSERT-TO-TOLERANCE (1/GAMMA 100.0d0) 1.0715102881254669232d-156 +TEST-TOL2+)
(ASSERT-TO-TOLERANCE (1/GAMMA 0.0d0) 0.0d0 +TEST-TOL0+)
(ASSERT-TO-TOLERANCE (1/GAMMA -1.0d0) 0.0d0 +TEST-TOL0+)
(ASSERT-TO-TOLERANCE (1/GAMMA -2.0d0) 0.0d0 +TEST-TOL0+)
(ASSERT-TO-TOLERANCE (1/GAMMA -3.0d0) 0.0d0 +TEST-TOL0+)
(ASSERT-TO-TOLERANCE (1/GAMMA -4.0d0) 0.0d0 +TEST-TOL0+)
(ASSERT-TO-TOLERANCE (1/GAMMA -10.5d0) #.(/ -2.640121820547716316d-07) +TEST-TOL2+)
(ASSERT-TO-TOLERANCE (1/GAMMA -11.25d0) #.(/ 6.027393816261931672d-08) +TEST-TOL1+)
(ASSERT-TO-TOLERANCE (1/GAMMA (+ (- 1.0d0) (/ 1.0d0 65536.0d0)))
#.(/ -65536.42280587818970d0) +TEST-TOL1+)
(ASSERT-TO-TOLERANCE (LOG-GAMMA-COMPLEX #C(5.0d0 2.0d0))
'(2.7487017561338026749d0 3.0738434100497007915d0) +TEST-TOL0+)
(ASSERT-TO-TOLERANCE (LOG-GAMMA-COMPLEX #C(100.0d0 100.0d0))
'(315.07804459949331323d0 2.0821801804113110099d0)
;; we can't set tolerances separately, so set the widest
+TEST-TOL3+)
(ASSERT-TO-TOLERANCE (LOG-GAMMA-COMPLEX #C(100.0d0 -1000.0d0))
'(-882.3920483010362817000d0 -2.1169293725678813270d0)
;; we can't set tolerances separately, so set the widest
+TEST-TOL3+)
(ASSERT-TO-TOLERANCE (LOG-GAMMA-COMPLEX #C(-100.0d0 -1.0d0))
'(-365.0362469529239516000d0 -3.0393820262864361140d0)
+TEST-TOL1+)
(ASSERT-TO-TOLERANCE (TAYLOR-COEFFICIENT 10 (/ 1048576.0d0)) 1.7148961854776073928d-67
+TEST-TOL0+)
(ASSERT-TO-TOLERANCE (TAYLOR-COEFFICIENT 10 (/ 1024.0d0)) 2.1738891788497900281d-37 +TEST-TOL0+)
(ASSERT-TO-TOLERANCE (TAYLOR-COEFFICIENT 10 1.0d0) 2.7557319223985890653d-07 +TEST-TOL0+)
(ASSERT-TO-TOLERANCE (TAYLOR-COEFFICIENT 10 5.0d0) 2.6911444554673721340d0 +TEST-TOL0+)
(ASSERT-TO-TOLERANCE (TAYLOR-COEFFICIENT 10 500.0d0) 2.6911444554673721340d+20 +TEST-TOL0+)
(ASSERT-TO-TOLERANCE (TAYLOR-COEFFICIENT 100 100.0d0) 1.0715102881254669232d+42 +TEST-TOL1+)
(ASSERT-TO-TOLERANCE (TAYLOR-COEFFICIENT 1000 200.0d0) 2.6628790558154746898d-267 +TEST-TOL1+)
(ASSERT-TO-TOLERANCE (TAYLOR-COEFFICIENT 1000 500.0d0) 2.3193170139740855074d+131 +TEST-TOL1+)
(ASSERT-TO-TOLERANCE (FACTORIAL 0) 1.0d0 +TEST-TOL0+)
(ASSERT-TO-TOLERANCE (FACTORIAL 1) 1.0d0 +TEST-TOL0+)
(ASSERT-TO-TOLERANCE (FACTORIAL 7) 5040.0d0 +TEST-TOL0+)
(ASSERT-TO-TOLERANCE (FACTORIAL 33) 8.683317618811886496d+36 +TEST-TOL0+)
(ASSERT-TO-TOLERANCE (DOUBLE-FACTORIAL 0) 1.0d0 +TEST-TOL0+)
(ASSERT-TO-TOLERANCE (DOUBLE-FACTORIAL 1) 1.0d0 +TEST-TOL0+)
(ASSERT-TO-TOLERANCE (DOUBLE-FACTORIAL 7) 105.0d0 +TEST-TOL0+)
(ASSERT-TO-TOLERANCE (DOUBLE-FACTORIAL 33) 6.332659870762850625d+18 +TEST-TOL0+)
(ASSERT-TO-TOLERANCE (LOG-FACTORIAL 0) 0.0d0 +TEST-TOL0+)
(ASSERT-TO-TOLERANCE (LOG-FACTORIAL 1) 0.0d0 +TEST-TOL0+)
(ASSERT-TO-TOLERANCE (LOG-FACTORIAL 7) 8.525161361065414300d0 +TEST-TOL0+)
(ASSERT-TO-TOLERANCE (LOG-FACTORIAL 33) 85.05446701758151741d0 +TEST-TOL0+)
(ASSERT-TO-TOLERANCE (LOG-DOUBLE-FACTORIAL 0) 0.0d0 +TEST-TOL0+)
(ASSERT-TO-TOLERANCE (LOG-DOUBLE-FACTORIAL 7) 4.653960350157523371d0 +TEST-TOL0+)
(ASSERT-TO-TOLERANCE (LOG-DOUBLE-FACTORIAL 33) 43.292252022541719660d0 +TEST-TOL0+)
(ASSERT-TO-TOLERANCE (LOG-DOUBLE-FACTORIAL 34) 45.288575519655959140d0 +TEST-TOL0+)
(ASSERT-TO-TOLERANCE (LOG-DOUBLE-FACTORIAL 1034) 3075.6383796271197707d0 +TEST-TOL0+)
(ASSERT-TO-TOLERANCE (LOG-DOUBLE-FACTORIAL 1035) 3078.8839081731809169d0 +TEST-TOL0+)
(ASSERT-TO-TOLERANCE (LOG-CHOOSE 7 3) 3.555348061489413680d0 +TEST-TOL0+)
(ASSERT-TO-TOLERANCE (LOG-CHOOSE 5 2) 2.302585092994045684d0 +TEST-TOL0+)
(ASSERT-TO-TOLERANCE (CHOOSE 7 3) 35.0d0 +TEST-TOL0+)
(ASSERT-TO-TOLERANCE (CHOOSE 7 4) 35.0d0 +TEST-TOL0+)
(ASSERT-TO-TOLERANCE (CHOOSE 5 2) 10.0d0 +TEST-TOL0+)
(ASSERT-TO-TOLERANCE (CHOOSE 5 3) 10.0d0 +TEST-TOL0+)
(ASSERT-TO-TOLERANCE (CHOOSE 500 495) 255244687600.0d0 +TEST-TOL0+)
(ASSERT-TO-TOLERANCE (CHOOSE 500 5) 255244687600.0d0 +TEST-TOL0+)
(ASSERT-TO-TOLERANCE (CHOOSE 500 200) 5.054949849935532221d+144 +TEST-TOL5+)
(ASSERT-TO-TOLERANCE (CHOOSE 500 300) 5.054949849935532221d+144 +TEST-TOL5+)
(ASSERT-TO-TOLERANCE (LOG-POCHAMMER 5.0d0 0.0d0) 0.0d0 +TEST-TOL0+)
(ASSERT-TO-TOLERANCE (LOG-POCHAMMER 5.0d0 (/ 65536.0d0)) 0.000022981557571259389129d0 +TEST-TOL0+)
(ASSERT-TO-TOLERANCE (LOG-POCHAMMER 5.0d0 (/ 256.0d0)) 0.005884960217985189004d0 +TEST-TOL2+)
(ASSERT-TO-TOLERANCE (LOG-POCHAMMER 7.0d0 3.0d0) 6.222576268071368616d0 +TEST-TOL0+)
(ASSERT-TO-TOLERANCE (LOG-POCHAMMER 5.0d0 2.0d0) 3.401197381662155375d0 +TEST-TOL0+)
(assert-to-tolerance (log-pochammer-sign 5.0d0 0.0d0) '(0.0d0 1.0d0) +test-tol1+)
(assert-to-tolerance (log-pochammer-sign -4.5d0 0.25d0)
'(0.7430116475119920117d0 1.0d0) +test-tol1+)
(assert-to-tolerance (log-pochammer-sign -4.5d0 1.25d0)
'(2.1899306304483174731d0 -1.0d0) +test-tol1+)
(ASSERT-TO-TOLERANCE (POCHAMMER 5.0d0 0.0d0) 1.0d0 +TEST-TOL0+)
(ASSERT-TO-TOLERANCE (POCHAMMER 7.0d0 3.0d0) 504.0d0 +TEST-TOL0+)
(ASSERT-TO-TOLERANCE (POCHAMMER 5.0d0 2.0d0) 30.0d0 +TEST-TOL1+)
(ASSERT-TO-TOLERANCE (POCHAMMER 5.0d0 (/ 256.0d0)) 1.0059023106151364982d0 +TEST-TOL0+)
(ASSERT-TO-TOLERANCE (RELATIVE-POCHAMMER 5.0d0 0.0d0) 1.506117668431800472d0 +TEST-TOL1+)
(ASSERT-TO-TOLERANCE (RELATIVE-POCHAMMER 7.0d0 3.0d0) #.(/ 503.0d0 3.0d0) +TEST-TOL0+)
(ASSERT-TO-TOLERANCE (RELATIVE-POCHAMMER 5.0d0 2.0d0) #.(/ 29.0d0 2.0d0) +TEST-TOL1+)
(ASSERT-TO-TOLERANCE (RELATIVE-POCHAMMER 5.0d0 0.01d0) 1.5186393661368275330d0 +TEST-TOL2+)
(ASSERT-TO-TOLERANCE (RELATIVE-POCHAMMER -5.5d0 0.01d0) 1.8584945633829063516d0 +TEST-TOL1+)
(ASSERT-TO-TOLERANCE (RELATIVE-POCHAMMER -5.5d0 (/ -8.0d0)) 1.0883319303552135488d0 +TEST-TOL1+)
(ASSERT-TO-TOLERANCE (RELATIVE-POCHAMMER -5.5d0 (/ -256.0d0)) 1.7678268037726177453d0
+TEST-TOL1+)
(ASSERT-TO-TOLERANCE (RELATIVE-POCHAMMER -5.5d0 -11.0d0) 0.09090909090939652475d0 +TEST-TOL0+)
(ASSERT-TO-TOLERANCE (POCHAMMER -9.0d0 -4.0d0) #.(/ 17160.0d0) +test-tol0+)
(ASSERT-TO-TOLERANCE (POCHAMMER -9.0d0 -3.0d0) #.(/ -1320.0d0) +test-tol0+)
(ASSERT-TO-TOLERANCE (POCHAMMER -9.0d0 -3.5d0) 0.0d0 +TEST-TOL0+)
(ASSERT-TO-TOLERANCE (POCHAMMER -9.0d0 4.0d0) 3024.0d0 +TEST-TOL0+)
(ASSERT-TO-TOLERANCE (POCHAMMER -9.0d0 3.0d0) -504.0d0 +TEST-TOL0+)
(ASSERT-TO-TOLERANCE (POCHAMMER -9.0d0 3.5d0) 0.0d0 +TEST-TOL0+)
(ASSERT-TO-TOLERANCE (POCHAMMER -9.0d0 0.0d0) 1.0d0 +TEST-TOL0+)
(ASSERT-TO-TOLERANCE (POCHAMMER -8.0d0 -4.0d0) #.(/ 11880.0d0) +test-tol0+)
(ASSERT-TO-TOLERANCE (POCHAMMER -8.0d0 -3.0d0) #.(/ -990.0d0) +test-tol0+)
(ASSERT-TO-TOLERANCE (POCHAMMER -8.0d0 +4.0d0) 1680.0d0 +TEST-TOL0+)
(ASSERT-TO-TOLERANCE (POCHAMMER -8.0d0 +3.0d0) -336.0d0 +TEST-TOL0+)
(ASSERT-TO-TOLERANCE (POCHAMMER -3.0d0 +4.0d0) 0.0d0 +TEST-TOL0+)
(ASSERT-TO-TOLERANCE (POCHAMMER -3.0d0 +3.0d0) -6.0d0 +TEST-TOL2+)
(ASSERT-TO-TOLERANCE (POCHAMMER -4.0d0 +4.0d0) 24.0d0 +TEST-TOL2+)
(ASSERT-TO-TOLERANCE (POCHAMMER -3.0d0 +100.0d0) 0.0d0 +TEST-TOL0+)
(ASSERT-TO-TOLERANCE (COMPLEMENTARY-INCOMPLETE-GAMMA 1.0d-100 0.001d0) 1.0d0 +TEST-TOL0+)
(ASSERT-TO-TOLERANCE (COMPLEMENTARY-INCOMPLETE-GAMMA 0.001d0 0.001d0) 0.9936876467088602902d0
+TEST-TOL0+)
(ASSERT-TO-TOLERANCE (COMPLEMENTARY-INCOMPLETE-GAMMA 0.001d0 1.0d0) 0.9997803916424144436d0
+TEST-TOL0+)
(ASSERT-TO-TOLERANCE (COMPLEMENTARY-INCOMPLETE-GAMMA 0.001d0 10.0d0) 0.9999999958306921828d0
+TEST-TOL0+)
(ASSERT-TO-TOLERANCE (COMPLEMENTARY-INCOMPLETE-GAMMA 1.0d0 0.001d0) 0.0009995001666250083319d0
+TEST-TOL0+)
(ASSERT-TO-TOLERANCE (COMPLEMENTARY-INCOMPLETE-GAMMA 1.0d0 1.01d0) 0.6357810204284766802d0
+TEST-TOL0+)
(ASSERT-TO-TOLERANCE (COMPLEMENTARY-INCOMPLETE-GAMMA 1.0d0 10.0d0) 0.9999546000702375151d0
+TEST-TOL0+)
(ASSERT-TO-TOLERANCE (COMPLEMENTARY-INCOMPLETE-GAMMA 10.0d0 10.01d0) 0.5433207586693410570d0
+TEST-TOL0+)
(ASSERT-TO-TOLERANCE (COMPLEMENTARY-INCOMPLETE-GAMMA 10.0d0 20.0d0) 0.9950045876916924128d0
+TEST-TOL0+)
(ASSERT-TO-TOLERANCE (COMPLEMENTARY-INCOMPLETE-GAMMA 1000.0d0 1000.1d0) 0.5054666401440661753d0
+TEST-TOL2+)
(ASSERT-TO-TOLERANCE (COMPLEMENTARY-INCOMPLETE-GAMMA 1000.0d0 2000.0d0) 1.0d0 +TEST-TOL0+)
(ASSERT-TO-TOLERANCE (COMPLEMENTARY-INCOMPLETE-GAMMA 34.0d0 32.0d0)
0.3849626436463866776322932129d0 +TEST-TOL2+)
(ASSERT-TO-TOLERANCE (COMPLEMENTARY-INCOMPLETE-GAMMA 37.0d0 3.499999999999999289d+01)
0.3898035054195570860969333039d0 +TEST-TOL2+)
(ASSERT-TO-TOLERANCE (COMPLEMENTARY-INCOMPLETE-GAMMA 10.0d0 1.0d-16) 2.755731922398588814734648067d-167
+TEST-TOL2+)
(ASSERT-TO-TOLERANCE (COMPLEMENTARY-INCOMPLETE-GAMMA 1263131.0d0 1261282.3637d0)
0.04994777516935182963821362168d0 +TEST-TOL4+)
(ASSERT-TO-TOLERANCE (COMPLEMENTARY-INCOMPLETE-GAMMA 1263131.0d0 1263131.0d0)
0.500118321758657770672882362502514254d0 +TEST-TOL4+)
(ASSERT-TO-TOLERANCE (INCOMPLETE-GAMMA 0.0d0 0.001d0) 0.0d0 +TEST-TOL0+)
(ASSERT-TO-TOLERANCE (INCOMPLETE-GAMMA 0.001d0 0.001d0) 0.006312353291139709793d0 +TEST-TOL0+)
(ASSERT-TO-TOLERANCE (INCOMPLETE-GAMMA 0.001d0 1.0d0) 0.00021960835758555639171d0 +TEST-TOL1+)
(ASSERT-TO-TOLERANCE (INCOMPLETE-GAMMA 0.001d0 2.0d0) 0.00004897691783098147880d0 +TEST-TOL2+)
(ASSERT-TO-TOLERANCE (INCOMPLETE-GAMMA 0.001d0 5.0d0) 1.1509813397308608541d-06 +TEST-TOL1+)
(ASSERT-TO-TOLERANCE (INCOMPLETE-GAMMA 1.0d0 0.001d0) 0.9990004998333749917d0 +TEST-TOL0+)
(ASSERT-TO-TOLERANCE (INCOMPLETE-GAMMA 1.0d0 1.01d0) 0.3642189795715233198d0 +TEST-TOL0+)
(ASSERT-TO-TOLERANCE (INCOMPLETE-GAMMA 1.0d0 10.0d0) 0.00004539992976248485154d0 +TEST-TOL0+)
(ASSERT-TO-TOLERANCE (INCOMPLETE-GAMMA 10.0d0 10.01d0) 0.4566792413306589430d0 +TEST-TOL0+)
(ASSERT-TO-TOLERANCE (INCOMPLETE-GAMMA 10.0d0 100.0d0) 1.1253473960842733885d-31 +TEST-TOL2+)
(ASSERT-TO-TOLERANCE (INCOMPLETE-GAMMA 1000.0d0 1000.1d0) 0.4945333598559338247d0 +TEST-TOL2+)
(ASSERT-TO-TOLERANCE (INCOMPLETE-GAMMA 1000.0d0 2000.0d0) 6.847349459614753180d-136 +TEST-TOL2+)
(ASSERT-TO-TOLERANCE (INCOMPLETE-GAMMA 100.0d0 99.0d0) 0.5266956696005394d0 +TEST-TOL2+)
(ASSERT-TO-TOLERANCE (INCOMPLETE-GAMMA 200.0d0 199.0d0) 0.5188414119121281d0 +TEST-TOL2+)
(ASSERT-TO-TOLERANCE (COMPLEMENTARY-INCOMPLETE-GAMMA 100.0d0 99.0d0) 0.4733043303994607d0 +TEST-TOL2+)
(ASSERT-TO-TOLERANCE (COMPLEMENTARY-INCOMPLETE-GAMMA 200.0d0 199.0d0) 0.4811585880878718d0 +TEST-TOL2+)
(ASSERT-TO-TOLERANCE (COMPLEMENTARY-INCOMPLETE-GAMMA 5670.0d0 4574.0d0) 3.063972328743934d-55 +TEST-TOL2+)
(ASSERT-TO-TOLERANCE (INCOMPLETE-GAMMA 5670.0d0 4574.0d0) 1.0000000000000000d0 +TEST-TOL2+)
(ASSERT-TO-TOLERANCE (INCOMPLETE-GAMMA (+ 1.0d+06 -1.0d0) (+ 1.0d+06 -2.0d0))
0.50026596175224547004d0 +TEST-TOL3+)
(ASSERT-TO-TOLERANCE (INCOMPLETE-GAMMA (+ 1.0d+06 2.0d0) (+ 1.0d+06 1.0d0))
0.50026596135330304336d0 +TEST-TOL2+)
(ASSERT-TO-TOLERANCE (INCOMPLETE-GAMMA 1.0d+06 (+ 1.0d+06 -2.0d0))
0.50066490399940144811d0 +TEST-TOL2+)
(ASSERT-TO-TOLERANCE (INCOMPLETE-GAMMA 1.0d+07 (+ 1.0d+07 -2.0d0))
0.50021026104978614908d0 +TEST-TOL2+)
(ASSERT-TO-TOLERANCE (NONNORMALIZED-INCOMPLETE-GAMMA (/ -1048576.0d0) (/ 1048576.0d0))
13.285819596290624271d0 +TEST-TOL0+)
(ASSERT-TO-TOLERANCE (NONNORMALIZED-INCOMPLETE-GAMMA -0.001d0 (/ 1048576.0d0))
13.381275128625328858d0 +TEST-TOL0+)
(ASSERT-TO-TOLERANCE (NONNORMALIZED-INCOMPLETE-GAMMA -1.0d0 (/ 1048576.0d0))
1.0485617142715768655d+06 +TEST-TOL0+)
(ASSERT-TO-TOLERANCE (NONNORMALIZED-INCOMPLETE-GAMMA -0.00001d0 0.001d0) 6.3317681434563592142d0
+TEST-TOL0+)
(ASSERT-TO-TOLERANCE (NONNORMALIZED-INCOMPLETE-GAMMA -0.0001d0 0.001d0) 6.3338276439767189385d0
+TEST-TOL0+)
(ASSERT-TO-TOLERANCE (NONNORMALIZED-INCOMPLETE-GAMMA -0.001d0 0.001d0) 6.3544709102510843793d0
+TEST-TOL0+)
(ASSERT-TO-TOLERANCE (NONNORMALIZED-INCOMPLETE-GAMMA -0.5d0 0.001d0) 59.763880515942196981d0
+TEST-TOL0+)
(ASSERT-TO-TOLERANCE (NONNORMALIZED-INCOMPLETE-GAMMA -1.0d0 0.001d0) 992.66896046923884234d0
+TEST-TOL0+)
(ASSERT-TO-TOLERANCE (NONNORMALIZED-INCOMPLETE-GAMMA -3.5d0 0.001d0) 9.0224404490639003706d+09
+TEST-TOL1+)
(ASSERT-TO-TOLERANCE (NONNORMALIZED-INCOMPLETE-GAMMA -10.5d0 0.001d0) 3.0083661558184815656d+30
+TEST-TOL2+)
(ASSERT-TO-TOLERANCE (NONNORMALIZED-INCOMPLETE-GAMMA -0.001d0 0.1d0) 1.8249109609418620068d0
+TEST-TOL0+)
(ASSERT-TO-TOLERANCE (NONNORMALIZED-INCOMPLETE-GAMMA -0.5d0 0.1d0) 3.4017693366916154163d0
+TEST-TOL0+)
(ASSERT-TO-TOLERANCE (NONNORMALIZED-INCOMPLETE-GAMMA -10.0d0 0.1d0) 8.9490757483586989181d+08
+TEST-TOL1+)
(ASSERT-TO-TOLERANCE (NONNORMALIZED-INCOMPLETE-GAMMA -10.5d0 0.1d0) 2.6967403834226421766d+09
+TEST-TOL1+)
(ASSERT-TO-TOLERANCE (NONNORMALIZED-INCOMPLETE-GAMMA -0.001d0 1.0d0) 0.21928612679072766340d0
+TEST-TOL1+)
(ASSERT-TO-TOLERANCE (NONNORMALIZED-INCOMPLETE-GAMMA -0.5d0 1.0d0) 0.17814771178156069019d0
+TEST-TOL1+)
(ASSERT-TO-TOLERANCE (NONNORMALIZED-INCOMPLETE-GAMMA -1.0d0 1.0d0) 0.14849550677592204792d0
+TEST-TOL1+)
(ASSERT-TO-TOLERANCE (NONNORMALIZED-INCOMPLETE-GAMMA -2.5d0 1.0d0) 0.096556648631275160264d0
+TEST-TOL1+)
(ASSERT-TO-TOLERANCE (NONNORMALIZED-INCOMPLETE-GAMMA -1.0d0 10.0d0) 3.8302404656316087616d-07
+TEST-TOL1+)
(ASSERT-TO-TOLERANCE (NONNORMALIZED-INCOMPLETE-GAMMA -0.001d0 10.0d0) 4.1470562324807320961d-06
+TEST-TOL1+)
(ASSERT-TO-TOLERANCE (NONNORMALIZED-INCOMPLETE-GAMMA -0.5d0 10.0d0) 1.2609042613241570681d-06
+TEST-TOL0+)
(ASSERT-TO-TOLERANCE (NONNORMALIZED-INCOMPLETE-GAMMA -1.0d0 10.0d0) 3.8302404656316087616d-07
+TEST-TOL1+)
(ASSERT-TO-TOLERANCE (NONNORMALIZED-INCOMPLETE-GAMMA -10.5d0 10.0d0) 6.8404927328441566785d-17
+TEST-TOL1+)
(ASSERT-TO-TOLERANCE (NONNORMALIZED-INCOMPLETE-GAMMA -100.0d0 10.0d0) 4.1238327669858313997d-107
+TEST-TOL2+)
(ASSERT-TO-TOLERANCE (NONNORMALIZED-INCOMPLETE-GAMMA -200.0d0 10.0d0) 2.1614091830529343423d-207
+TEST-TOL2+)
(ASSERT-TO-TOLERANCE (NONNORMALIZED-INCOMPLETE-GAMMA 0.0d0 0.001d0) 6.3315393641361493320d0
+TEST-TOL0+)
(ASSERT-TO-TOLERANCE (NONNORMALIZED-INCOMPLETE-GAMMA 0.001d0 0.001d0) 6.3087159394864007261d0
+TEST-TOL0+)
(ASSERT-TO-TOLERANCE (NONNORMALIZED-INCOMPLETE-GAMMA 1.0d0 0.001d0) 0.99900049983337499167d0
+TEST-TOL0+)
(ASSERT-TO-TOLERANCE (NONNORMALIZED-INCOMPLETE-GAMMA 10.0d0 0.001d0) 362880.0d0 +TEST-TOL0+)
(ASSERT-TO-TOLERANCE (NONNORMALIZED-INCOMPLETE-GAMMA 0.0d0 1.0d0) 0.21938393439552027368d0
+TEST-TOL0+)
(ASSERT-TO-TOLERANCE (NONNORMALIZED-INCOMPLETE-GAMMA 0.001d0 1.0d0) 0.21948181320730279613d0
+TEST-TOL1+)
(ASSERT-TO-TOLERANCE (NONNORMALIZED-INCOMPLETE-GAMMA 1.0d0 1.0d0) 0.36787944117144232160d0
+TEST-TOL0+)
(ASSERT-TO-TOLERANCE (NONNORMALIZED-INCOMPLETE-GAMMA 10.0d0 1.0d0) 362879.95956592242045d0
+TEST-TOL0+)
(ASSERT-TO-TOLERANCE (NONNORMALIZED-INCOMPLETE-GAMMA 100.0d0 1.0d0) 9.3326215443944152682d+155
+TEST-TOL0+)
(ASSERT-TO-TOLERANCE (NONNORMALIZED-INCOMPLETE-GAMMA 0.0d0 100.0d0) 3.6835977616820321802d-46
+TEST-TOL2+)
(ASSERT-TO-TOLERANCE (NONNORMALIZED-INCOMPLETE-GAMMA 0.001d0 100.0d0) 3.7006367674063550631d-46
+TEST-TOL2+)
(ASSERT-TO-TOLERANCE (NONNORMALIZED-INCOMPLETE-GAMMA 1.0d0 100.0d0) 3.7200759760208359630d-44
+TEST-TOL2+)
(ASSERT-TO-TOLERANCE (NONNORMALIZED-INCOMPLETE-GAMMA 10.0d0 100.0d0) 4.0836606309106112723d-26
+TEST-TOL2+)
(ASSERT-TO-TOLERANCE (NONNORMALIZED-INCOMPLETE-GAMMA 100.0d0 100.0d0) 4.5421981208626694294d+155
+TEST-TOL1+)
(ASSERT-TO-TOLERANCE (LOG-BETA 1.0d-8 1.0d-8) 19.113827924512310617d0 +TEST-TOL0+)
(ASSERT-TO-TOLERANCE (LOG-BETA 1.0d-8 0.01d0) 18.420681743788563403d0 +TEST-TOL0+)
(ASSERT-TO-TOLERANCE (LOG-BETA 1.0d-8 1.0d0) 18.420680743952365472d0 +TEST-TOL0+)
(ASSERT-TO-TOLERANCE (LOG-BETA 1.0d-8 10.0d0) 18.420680715662683009d0 +TEST-TOL0+)
(ASSERT-TO-TOLERANCE (LOG-BETA 1.0d-8 1000.0d0) 18.420680669107656949d0 +TEST-TOL0+)
(ASSERT-TO-TOLERANCE (LOG-BETA 0.1d0 0.1d0) 2.9813614810376273949d0 +TEST-TOL1+)
(ASSERT-TO-TOLERANCE (LOG-BETA 0.1d0 1.0d0) 2.3025850929940456840d0 +TEST-TOL1+)
(ASSERT-TO-TOLERANCE (LOG-BETA 0.1d0 100.0d0) 1.7926462324527931217d0 +TEST-TOL0+)
(ASSERT-TO-TOLERANCE (LOG-BETA 0.1d0 1000.0d0) 1.5619821298353164928d0 +TEST-TOL0+)
(ASSERT-TO-TOLERANCE (LOG-BETA 1.0d0 1.00025d0) -0.0002499687552073570d0 +TEST-TOL4+)
(ASSERT-TO-TOLERANCE (LOG-BETA 1.0d0 1.01d0) -0.009950330853168082848d0 +TEST-TOL3+)
(ASSERT-TO-TOLERANCE (LOG-BETA 1.0d0 1000.0d0) -6.907755278982137052d0 +TEST-TOL0+)
(ASSERT-TO-TOLERANCE (LOG-BETA 100.0d0 100.0d0) -139.66525908670663927d0 +TEST-TOL2+)
(ASSERT-TO-TOLERANCE (LOG-BETA 100.0d0 1000.0d0) -336.4348576477366051d0 +TEST-TOL0+)
(ASSERT-TO-TOLERANCE (LOG-BETA 100.0d0 1.0d+8) -1482.9339185256447309d0 +TEST-TOL0+)
(ASSERT-TO-TOLERANCE (BETA 1.0d0 1.0d0) 1.0d0 +TEST-TOL0+)
(ASSERT-TO-TOLERANCE (BETA 1.0d0 1.001d0) 0.9990009990009990010d0 +TEST-TOL0+)
(ASSERT-TO-TOLERANCE (BETA 1.0d0 5.0d0) 0.2d0 +TEST-TOL1+)
(ASSERT-TO-TOLERANCE (BETA 1.0d0 100.0d0) 0.01d0 +TEST-TOL1+)
(ASSERT-TO-TOLERANCE (BETA 10.0d0 100.0d0) 2.3455339739604649879d-15 +TEST-TOL2+)
(ASSERT-TO-TOLERANCE (BETA 2.5d0 -0.1d0) -11.43621278354402041480d0 +TEST-TOL2+)
(ASSERT-TO-TOLERANCE (BETA 2.5d0 -1.1d0) 14.555179906328753255202d0 +TEST-TOL2+)
(ASSERT-TO-TOLERANCE (BETA -0.25d0 -0.1d0) -13.238937960945229110d0 +TEST-TOL2+)
(ASSERT-TO-TOLERANCE (BETA -1.25d0 -0.1d0) -14.298052997820847439d0 +TEST-TOL2+)
(ASSERT-TO-TOLERANCE (BETA -100.1d0 -99.1d0) -1.005181917797644630375787297d60 +TEST-TOL3+)
(ASSERT-TO-TOLERANCE (BETA -100.1d0 99.3d0) 0.0004474258199579694011200969001d0 +TEST-TOL2+)
(ASSERT-TO-TOLERANCE (BETA 100.1d0 -99.3d0) 1.328660939628876472028853747d0 +TEST-TOL2+)
(ASSERT-TO-TOLERANCE (BETA -100.1d0 1.2d0) 0.00365530364287960795444856281d0 +TEST-TOL3+)
(ASSERT-TO-TOLERANCE (BETA 100.1d0 -1.2d0) 1203.895236907821059270698160d0 +TEST-TOL2+)
(ASSERT-TO-TOLERANCE (BETA -100.1d0 -1.2d0) -3236.073671884748847700283841d0 +TEST-TOL2+)
(ASSERT-TO-TOLERANCE (BETA -100.001d0 0.0099d0) -853.946649365611147996495177d0 +TEST-TOL4+)
(ASSERT-TO-TOLERANCE (BETA 1.0d-32 1.5d0) 1.0d32 +TEST-TOL2+)
(ASSERT-TO-TOLERANCE (BETA 1.0d-6 0.5d0) 1000001.386293677092419390336d0 +TEST-TOL2+)
(ASSERT-TO-TOLERANCE (BETA -1.5d0 0.5d0) 0.0d0 +TEST-TOL0+)
(ASSERT-TO-TOLERANCE (INCOMPLETE-BETA 1.0d0 1.0d0 0.0d0) 0.0d0 +TEST-TOL2+)
(ASSERT-TO-TOLERANCE (INCOMPLETE-BETA 1.0d0 1.0d0 1.0d0) 1.0d0 +TEST-TOL2+)
(ASSERT-TO-TOLERANCE (INCOMPLETE-BETA 0.1d0 0.1d0 1.0d0) 1.0d0 +TEST-TOL2+)
(ASSERT-TO-TOLERANCE (INCOMPLETE-BETA 1.0d0 1.0d0 0.5d0) 0.5d0 +TEST-TOL2+)
(ASSERT-TO-TOLERANCE (INCOMPLETE-BETA 0.1d0 1.0d0 0.5d0) 0.9330329915368074160d0 +TEST-TOL2+)
(ASSERT-TO-TOLERANCE (INCOMPLETE-BETA 10.0d0 1.0d0 0.5d0) 0.0009765625000000000000d0 +TEST-TOL2+)
(ASSERT-TO-TOLERANCE (INCOMPLETE-BETA 50.0d0 1.0d0 0.5d0) 8.881784197001252323d-16 +TEST-TOL2+)
(ASSERT-TO-TOLERANCE (INCOMPLETE-BETA 1.0d0 0.1d0 0.5d0) 0.06696700846319258402d0 +TEST-TOL2+)
(ASSERT-TO-TOLERANCE (INCOMPLETE-BETA 1.0d0 10.0d0 0.5d0) 0.99902343750000000000d0 +TEST-TOL2+)
(ASSERT-TO-TOLERANCE (INCOMPLETE-BETA 1.0d0 50.0d0 0.5d0) 0.99999999999999911180d0 +TEST-TOL2+)
(ASSERT-TO-TOLERANCE (INCOMPLETE-BETA 1.0d0 1.0d0 0.1d0) 0.10d0 +TEST-TOL2+)
(ASSERT-TO-TOLERANCE (INCOMPLETE-BETA 1.0d0 2.0d0 0.1d0) 0.19d0 +TEST-TOL2+)
(ASSERT-TO-TOLERANCE (INCOMPLETE-BETA 1.0d0 2.0d0 0.9d0) 0.99d0 +TEST-TOL2+)
(ASSERT-TO-TOLERANCE (INCOMPLETE-BETA 50.0d0 60.0d0 0.5d0) 0.8309072939016694143d0 +TEST-TOL2+)
(ASSERT-TO-TOLERANCE (INCOMPLETE-BETA 90.0d0 90.0d0 0.5d0) 0.5d0 +TEST-TOL2+)
(ASSERT-TO-TOLERANCE (INCOMPLETE-BETA 500.0d0 500.0d0 0.6d0) 0.9999999999157549630d0 +TEST-TOL2+)
(ASSERT-TO-TOLERANCE (INCOMPLETE-BETA 5000.0d0 5000.0d0 0.4d0) 4.518543727260666383d-91 +TEST-TOL5+)
(ASSERT-TO-TOLERANCE (INCOMPLETE-BETA 5000.0d0 5000.0d0 0.6d0) 1.0d0 +TEST-TOL2+)
(ASSERT-TO-TOLERANCE (INCOMPLETE-BETA 5000.0d0 2000.0d0 0.6d0) 8.445388773903332659d-89 +TEST-TOL5+)
(ASSERT-TO-TOLERANCE (INCOMPLETE-BETA -0.1d0 -0.1d0 1.0d0) 1.0d0 +TEST-TOL2+)
(ASSERT-TO-TOLERANCE (INCOMPLETE-BETA -0.1d0 -0.2d0 1.0d0) 1.0d0 +TEST-TOL2+)
(ASSERT-TO-TOLERANCE (INCOMPLETE-BETA -0.2d0 -0.1d0 1.0d0) 1.0d0 +TEST-TOL2+)
(ASSERT-TO-TOLERANCE (INCOMPLETE-BETA -0.1d0 -0.2d0 0.5d0) 0.675252001958389971991335d0 +TEST-TOL2+)
(ASSERT-TO-TOLERANCE (INCOMPLETE-BETA -0.2d0 -0.1d0 0.5d0) 0.324747998041610028008665d0 +TEST-TOL2+)
(ASSERT-TO-TOLERANCE (INCOMPLETE-BETA -0.1d0 -0.1d0 0.0d0) 0.0d0 +TEST-TOL2+)
(ASSERT-TO-TOLERANCE (INCOMPLETE-BETA -0.1d0 -0.2d0 0.0d0) 0.0d0 +TEST-TOL2+)
(ASSERT-TO-TOLERANCE (INCOMPLETE-BETA -0.2d0 -0.1d0 0.0d0) 0.0d0 +TEST-TOL2+)
(ASSERT-TO-TOLERANCE (INCOMPLETE-BETA -0.1d0 -0.2d0 0.3d0) 0.7469186777964287252d0 +TEST-TOL2+)
(ASSERT-TO-TOLERANCE (INCOMPLETE-BETA -0.2d0 -0.1d0 0.3d0) 0.3995299653262016818d0 +TEST-TOL2+)
)
| null | https://raw.githubusercontent.com/cram2/cram/dcb73031ee944d04215bbff9e98b9e8c210ef6c5/cram_3rdparty/gsll/src/tests/gamma.lisp | lisp |
This program is free software: you can redistribute it and/or modify
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
along with this program. If not, see </>.
we can't set tolerances separately, so set the widest
we can't set tolerances separately, so set the widest | Regression test GAMMA for GSLL , automatically generated
Copyright 2009 , 2010
Distributed under the terms of the GNU General Public License
it under the terms of the GNU General Public License as published by
the Free Software Foundation , either version 3 of the License , or
You should have received a copy of the GNU General Public License
(in-package :gsl)
(LISP-UNIT:DEFINE-TEST GAMMA
Semi - automatically converted from specfunc / test_gamma.c
(ASSERT-TO-TOLERANCE (LOG-GAMMA -0.1d0) 2.368961332728788655d0 +TEST-TOL0+)
(ASSERT-TO-TOLERANCE (LOG-GAMMA (- (/ 1.0d0 256.0d0))) 5.547444766967471595d0 +TEST-TOL0+)
(ASSERT-TO-TOLERANCE (LOG-GAMMA 1.0d-08) 18.420680738180208905d0 +TEST-TOL0+)
(ASSERT-TO-TOLERANCE (LOG-GAMMA 0.1d0) 2.252712651734205d0 +TEST-TOL0+)
(ASSERT-TO-TOLERANCE (LOG-GAMMA (+ 1.0d0 (/ 1.0d0 256.0d0))) -0.0022422226599611501448d0 +TEST-TOL0+)
(ASSERT-TO-TOLERANCE (LOG-GAMMA (+ 2.0d0 (/ 1.0d0 256.0d0))) 0.0016564177556961728692d0 +TEST-TOL0+)
(ASSERT-TO-TOLERANCE (LOG-GAMMA 100.0d0) 359.1342053695753d0 +TEST-TOL0+)
(ASSERT-TO-TOLERANCE (LOG-GAMMA (+ (- 1.0d0) (- (/ 1.0d0 65536.0d0)))) 11.090348438090047844d0 +TEST-TOL0+)
(ASSERT-TO-TOLERANCE (LOG-GAMMA (+ (- 1.0d0) (- (/ 1.0d0 2.68435456d8)))) 19.408121054103474300d0 +TEST-TOL0+)
(ASSERT-TO-TOLERANCE (LOG-GAMMA -100.5d0) -364.9009683094273518d0 +TEST-TOL0+)
(ASSERT-TO-TOLERANCE (LOG-GAMMA (+ (- 100) (- (/ 1.0d0 65536.0d0)))) -352.6490910117097874d0 +TEST-TOL0+)
(assert-to-tolerance (log-gamma-sign 0.7d0)
'(0.26086724653166651439d0 1.0d0) +test-tol1+)
(assert-to-tolerance (log-gamma-sign 0.1d0)
'(2.2527126517342059599d0 1.0d0) +test-tol0+)
(assert-to-tolerance (log-gamma-sign -0.1d0)
'(2.368961332728788655d0 -1.0d0) +test-tol0+)
(assert-to-tolerance (log-gamma-sign (+ -1.0d0 (/ -65536.0d0)))
'(11.090348438090047844d0 1.0d0) +test-tol0+)
(assert-to-tolerance (log-gamma-sign (+ -2.0d0 (/ -256.0d0)))
'(4.848447725860607213d0 -1.0d0) +test-tol0+)
(assert-to-tolerance (log-gamma-sign (+ -2.0d0 (/ -65536.0d0)))
'(10.397193628164674967d0 -1.0d0) +test-tol0+)
(assert-to-tolerance (log-gamma-sign (+ -3.0d0 (/ -8.0d0)))
'(0.15431112768404182427d0 1.0d0) +test-tol2+)
(assert-to-tolerance (log-gamma-sign -100.5d0)
'(-364.9009683094273518d0 -1.0d0) +test-tol0+)
(ASSERT-TO-TOLERANCE (GAMMA (+ 1.0d0 (/ 1.0d0 4096.0d0))) 0.9998591371459403421d0 +TEST-TOL0+)
(ASSERT-TO-TOLERANCE (GAMMA (+ 1.0d0 (/ 1.0d0 32.0d0))) 0.9829010992836269148d0 +TEST-TOL0+)
(ASSERT-TO-TOLERANCE (GAMMA (+ 2.0d0 (/ 1.0d0 256.0d0))) 1.0016577903733583299d0 +TEST-TOL0+)
(ASSERT-TO-TOLERANCE (GAMMA 9.0d0) 40320.0d0 +TEST-TOL0+)
(ASSERT-TO-TOLERANCE (GAMMA 10.0d0) 362880.0d0 +TEST-TOL0+)
(ASSERT-TO-TOLERANCE (GAMMA 100.0d0) 9.332621544394415268d+155 +TEST-TOL2+)
(ASSERT-TO-TOLERANCE (GAMMA 170.0d0) 4.269068009004705275d+304 +TEST-TOL2+)
(ASSERT-TO-TOLERANCE (GAMMA 171.0d0) 7.257415615307998967d+306 +TEST-TOL2+)
(ASSERT-TO-TOLERANCE (GAMMA -10.5d0) -2.640121820547716316d-07 +TEST-TOL0+)
(ASSERT-TO-TOLERANCE (GAMMA -11.25d0) 6.027393816261931672d-08 +TEST-TOL0+)
(ASSERT-TO-TOLERANCE (GAMMA (+ (- 1.0d0) (/ 1.0d0 65536.0d0))) -65536.42280587818970d0 +TEST-TOL0+)
(ASSERT-TO-TOLERANCE (GAMMA* 1.0d-08) 3989.423555759890865d0 +TEST-TOL1+)
(ASSERT-TO-TOLERANCE (GAMMA* 1.0d-05) 126.17168469882690233d0 +TEST-TOL0+)
(ASSERT-TO-TOLERANCE (GAMMA* 0.001d0) 12.708492464364073506d0 +TEST-TOL0+)
(ASSERT-TO-TOLERANCE (GAMMA* 1.5d0) 1.0563442442685598666d0 +TEST-TOL0+)
(ASSERT-TO-TOLERANCE (GAMMA* 3.0d0) 1.0280645179187893045d0 +TEST-TOL0+)
(ASSERT-TO-TOLERANCE (GAMMA* 9.0d0) 1.0092984264218189715d0 +TEST-TOL0+)
(ASSERT-TO-TOLERANCE (GAMMA* 11.0d0) 1.0076024283104962850d0 +TEST-TOL0+)
(ASSERT-TO-TOLERANCE (GAMMA* 100.0d0) 1.0008336778720121418d0 +TEST-TOL0+)
(ASSERT-TO-TOLERANCE (GAMMA* 1.0d+05) 1.0000008333336805529d0 +TEST-TOL0+)
(ASSERT-TO-TOLERANCE (GAMMA* 1.0d+20) 1.0d0 +TEST-TOL0+)
(ASSERT-TO-TOLERANCE (1/GAMMA 1.0d0) 1.0d0 +TEST-TOL0+)
(ASSERT-TO-TOLERANCE (1/GAMMA 2.0d0) 1.0d0 +TEST-TOL0+)
(ASSERT-TO-TOLERANCE (1/GAMMA 3.0d0) 0.5d0 +TEST-TOL0+)
(ASSERT-TO-TOLERANCE (1/GAMMA 4.0d0) #.(/ 1.0d0 6.0d0) +TEST-TOL0+)
(ASSERT-TO-TOLERANCE (1/GAMMA 10.0d0) #.(/ 1.0d0 362880.0d0) +TEST-TOL0+)
(ASSERT-TO-TOLERANCE (1/GAMMA 100.0d0) 1.0715102881254669232d-156 +TEST-TOL2+)
(ASSERT-TO-TOLERANCE (1/GAMMA 0.0d0) 0.0d0 +TEST-TOL0+)
(ASSERT-TO-TOLERANCE (1/GAMMA -1.0d0) 0.0d0 +TEST-TOL0+)
(ASSERT-TO-TOLERANCE (1/GAMMA -2.0d0) 0.0d0 +TEST-TOL0+)
(ASSERT-TO-TOLERANCE (1/GAMMA -3.0d0) 0.0d0 +TEST-TOL0+)
(ASSERT-TO-TOLERANCE (1/GAMMA -4.0d0) 0.0d0 +TEST-TOL0+)
(ASSERT-TO-TOLERANCE (1/GAMMA -10.5d0) #.(/ -2.640121820547716316d-07) +TEST-TOL2+)
(ASSERT-TO-TOLERANCE (1/GAMMA -11.25d0) #.(/ 6.027393816261931672d-08) +TEST-TOL1+)
(ASSERT-TO-TOLERANCE (1/GAMMA (+ (- 1.0d0) (/ 1.0d0 65536.0d0)))
#.(/ -65536.42280587818970d0) +TEST-TOL1+)
(ASSERT-TO-TOLERANCE (LOG-GAMMA-COMPLEX #C(5.0d0 2.0d0))
'(2.7487017561338026749d0 3.0738434100497007915d0) +TEST-TOL0+)
(ASSERT-TO-TOLERANCE (LOG-GAMMA-COMPLEX #C(100.0d0 100.0d0))
'(315.07804459949331323d0 2.0821801804113110099d0)
+TEST-TOL3+)
(ASSERT-TO-TOLERANCE (LOG-GAMMA-COMPLEX #C(100.0d0 -1000.0d0))
'(-882.3920483010362817000d0 -2.1169293725678813270d0)
+TEST-TOL3+)
(ASSERT-TO-TOLERANCE (LOG-GAMMA-COMPLEX #C(-100.0d0 -1.0d0))
'(-365.0362469529239516000d0 -3.0393820262864361140d0)
+TEST-TOL1+)
(ASSERT-TO-TOLERANCE (TAYLOR-COEFFICIENT 10 (/ 1048576.0d0)) 1.7148961854776073928d-67
+TEST-TOL0+)
(ASSERT-TO-TOLERANCE (TAYLOR-COEFFICIENT 10 (/ 1024.0d0)) 2.1738891788497900281d-37 +TEST-TOL0+)
(ASSERT-TO-TOLERANCE (TAYLOR-COEFFICIENT 10 1.0d0) 2.7557319223985890653d-07 +TEST-TOL0+)
(ASSERT-TO-TOLERANCE (TAYLOR-COEFFICIENT 10 5.0d0) 2.6911444554673721340d0 +TEST-TOL0+)
(ASSERT-TO-TOLERANCE (TAYLOR-COEFFICIENT 10 500.0d0) 2.6911444554673721340d+20 +TEST-TOL0+)
(ASSERT-TO-TOLERANCE (TAYLOR-COEFFICIENT 100 100.0d0) 1.0715102881254669232d+42 +TEST-TOL1+)
(ASSERT-TO-TOLERANCE (TAYLOR-COEFFICIENT 1000 200.0d0) 2.6628790558154746898d-267 +TEST-TOL1+)
(ASSERT-TO-TOLERANCE (TAYLOR-COEFFICIENT 1000 500.0d0) 2.3193170139740855074d+131 +TEST-TOL1+)
(ASSERT-TO-TOLERANCE (FACTORIAL 0) 1.0d0 +TEST-TOL0+)
(ASSERT-TO-TOLERANCE (FACTORIAL 1) 1.0d0 +TEST-TOL0+)
(ASSERT-TO-TOLERANCE (FACTORIAL 7) 5040.0d0 +TEST-TOL0+)
(ASSERT-TO-TOLERANCE (FACTORIAL 33) 8.683317618811886496d+36 +TEST-TOL0+)
(ASSERT-TO-TOLERANCE (DOUBLE-FACTORIAL 0) 1.0d0 +TEST-TOL0+)
(ASSERT-TO-TOLERANCE (DOUBLE-FACTORIAL 1) 1.0d0 +TEST-TOL0+)
(ASSERT-TO-TOLERANCE (DOUBLE-FACTORIAL 7) 105.0d0 +TEST-TOL0+)
(ASSERT-TO-TOLERANCE (DOUBLE-FACTORIAL 33) 6.332659870762850625d+18 +TEST-TOL0+)
(ASSERT-TO-TOLERANCE (LOG-FACTORIAL 0) 0.0d0 +TEST-TOL0+)
(ASSERT-TO-TOLERANCE (LOG-FACTORIAL 1) 0.0d0 +TEST-TOL0+)
(ASSERT-TO-TOLERANCE (LOG-FACTORIAL 7) 8.525161361065414300d0 +TEST-TOL0+)
(ASSERT-TO-TOLERANCE (LOG-FACTORIAL 33) 85.05446701758151741d0 +TEST-TOL0+)
(ASSERT-TO-TOLERANCE (LOG-DOUBLE-FACTORIAL 0) 0.0d0 +TEST-TOL0+)
(ASSERT-TO-TOLERANCE (LOG-DOUBLE-FACTORIAL 7) 4.653960350157523371d0 +TEST-TOL0+)
(ASSERT-TO-TOLERANCE (LOG-DOUBLE-FACTORIAL 33) 43.292252022541719660d0 +TEST-TOL0+)
(ASSERT-TO-TOLERANCE (LOG-DOUBLE-FACTORIAL 34) 45.288575519655959140d0 +TEST-TOL0+)
(ASSERT-TO-TOLERANCE (LOG-DOUBLE-FACTORIAL 1034) 3075.6383796271197707d0 +TEST-TOL0+)
(ASSERT-TO-TOLERANCE (LOG-DOUBLE-FACTORIAL 1035) 3078.8839081731809169d0 +TEST-TOL0+)
(ASSERT-TO-TOLERANCE (LOG-CHOOSE 7 3) 3.555348061489413680d0 +TEST-TOL0+)
(ASSERT-TO-TOLERANCE (LOG-CHOOSE 5 2) 2.302585092994045684d0 +TEST-TOL0+)
(ASSERT-TO-TOLERANCE (CHOOSE 7 3) 35.0d0 +TEST-TOL0+)
(ASSERT-TO-TOLERANCE (CHOOSE 7 4) 35.0d0 +TEST-TOL0+)
(ASSERT-TO-TOLERANCE (CHOOSE 5 2) 10.0d0 +TEST-TOL0+)
(ASSERT-TO-TOLERANCE (CHOOSE 5 3) 10.0d0 +TEST-TOL0+)
(ASSERT-TO-TOLERANCE (CHOOSE 500 495) 255244687600.0d0 +TEST-TOL0+)
(ASSERT-TO-TOLERANCE (CHOOSE 500 5) 255244687600.0d0 +TEST-TOL0+)
(ASSERT-TO-TOLERANCE (CHOOSE 500 200) 5.054949849935532221d+144 +TEST-TOL5+)
(ASSERT-TO-TOLERANCE (CHOOSE 500 300) 5.054949849935532221d+144 +TEST-TOL5+)
(ASSERT-TO-TOLERANCE (LOG-POCHAMMER 5.0d0 0.0d0) 0.0d0 +TEST-TOL0+)
(ASSERT-TO-TOLERANCE (LOG-POCHAMMER 5.0d0 (/ 65536.0d0)) 0.000022981557571259389129d0 +TEST-TOL0+)
(ASSERT-TO-TOLERANCE (LOG-POCHAMMER 5.0d0 (/ 256.0d0)) 0.005884960217985189004d0 +TEST-TOL2+)
(ASSERT-TO-TOLERANCE (LOG-POCHAMMER 7.0d0 3.0d0) 6.222576268071368616d0 +TEST-TOL0+)
(ASSERT-TO-TOLERANCE (LOG-POCHAMMER 5.0d0 2.0d0) 3.401197381662155375d0 +TEST-TOL0+)
(assert-to-tolerance (log-pochammer-sign 5.0d0 0.0d0) '(0.0d0 1.0d0) +test-tol1+)
(assert-to-tolerance (log-pochammer-sign -4.5d0 0.25d0)
'(0.7430116475119920117d0 1.0d0) +test-tol1+)
(assert-to-tolerance (log-pochammer-sign -4.5d0 1.25d0)
'(2.1899306304483174731d0 -1.0d0) +test-tol1+)
(ASSERT-TO-TOLERANCE (POCHAMMER 5.0d0 0.0d0) 1.0d0 +TEST-TOL0+)
(ASSERT-TO-TOLERANCE (POCHAMMER 7.0d0 3.0d0) 504.0d0 +TEST-TOL0+)
(ASSERT-TO-TOLERANCE (POCHAMMER 5.0d0 2.0d0) 30.0d0 +TEST-TOL1+)
(ASSERT-TO-TOLERANCE (POCHAMMER 5.0d0 (/ 256.0d0)) 1.0059023106151364982d0 +TEST-TOL0+)
(ASSERT-TO-TOLERANCE (RELATIVE-POCHAMMER 5.0d0 0.0d0) 1.506117668431800472d0 +TEST-TOL1+)
(ASSERT-TO-TOLERANCE (RELATIVE-POCHAMMER 7.0d0 3.0d0) #.(/ 503.0d0 3.0d0) +TEST-TOL0+)
(ASSERT-TO-TOLERANCE (RELATIVE-POCHAMMER 5.0d0 2.0d0) #.(/ 29.0d0 2.0d0) +TEST-TOL1+)
(ASSERT-TO-TOLERANCE (RELATIVE-POCHAMMER 5.0d0 0.01d0) 1.5186393661368275330d0 +TEST-TOL2+)
(ASSERT-TO-TOLERANCE (RELATIVE-POCHAMMER -5.5d0 0.01d0) 1.8584945633829063516d0 +TEST-TOL1+)
(ASSERT-TO-TOLERANCE (RELATIVE-POCHAMMER -5.5d0 (/ -8.0d0)) 1.0883319303552135488d0 +TEST-TOL1+)
(ASSERT-TO-TOLERANCE (RELATIVE-POCHAMMER -5.5d0 (/ -256.0d0)) 1.7678268037726177453d0
+TEST-TOL1+)
(ASSERT-TO-TOLERANCE (RELATIVE-POCHAMMER -5.5d0 -11.0d0) 0.09090909090939652475d0 +TEST-TOL0+)
(ASSERT-TO-TOLERANCE (POCHAMMER -9.0d0 -4.0d0) #.(/ 17160.0d0) +test-tol0+)
(ASSERT-TO-TOLERANCE (POCHAMMER -9.0d0 -3.0d0) #.(/ -1320.0d0) +test-tol0+)
(ASSERT-TO-TOLERANCE (POCHAMMER -9.0d0 -3.5d0) 0.0d0 +TEST-TOL0+)
(ASSERT-TO-TOLERANCE (POCHAMMER -9.0d0 4.0d0) 3024.0d0 +TEST-TOL0+)
(ASSERT-TO-TOLERANCE (POCHAMMER -9.0d0 3.0d0) -504.0d0 +TEST-TOL0+)
(ASSERT-TO-TOLERANCE (POCHAMMER -9.0d0 3.5d0) 0.0d0 +TEST-TOL0+)
(ASSERT-TO-TOLERANCE (POCHAMMER -9.0d0 0.0d0) 1.0d0 +TEST-TOL0+)
(ASSERT-TO-TOLERANCE (POCHAMMER -8.0d0 -4.0d0) #.(/ 11880.0d0) +test-tol0+)
(ASSERT-TO-TOLERANCE (POCHAMMER -8.0d0 -3.0d0) #.(/ -990.0d0) +test-tol0+)
(ASSERT-TO-TOLERANCE (POCHAMMER -8.0d0 +4.0d0) 1680.0d0 +TEST-TOL0+)
(ASSERT-TO-TOLERANCE (POCHAMMER -8.0d0 +3.0d0) -336.0d0 +TEST-TOL0+)
(ASSERT-TO-TOLERANCE (POCHAMMER -3.0d0 +4.0d0) 0.0d0 +TEST-TOL0+)
(ASSERT-TO-TOLERANCE (POCHAMMER -3.0d0 +3.0d0) -6.0d0 +TEST-TOL2+)
(ASSERT-TO-TOLERANCE (POCHAMMER -4.0d0 +4.0d0) 24.0d0 +TEST-TOL2+)
(ASSERT-TO-TOLERANCE (POCHAMMER -3.0d0 +100.0d0) 0.0d0 +TEST-TOL0+)
(ASSERT-TO-TOLERANCE (COMPLEMENTARY-INCOMPLETE-GAMMA 1.0d-100 0.001d0) 1.0d0 +TEST-TOL0+)
(ASSERT-TO-TOLERANCE (COMPLEMENTARY-INCOMPLETE-GAMMA 0.001d0 0.001d0) 0.9936876467088602902d0
+TEST-TOL0+)
(ASSERT-TO-TOLERANCE (COMPLEMENTARY-INCOMPLETE-GAMMA 0.001d0 1.0d0) 0.9997803916424144436d0
+TEST-TOL0+)
(ASSERT-TO-TOLERANCE (COMPLEMENTARY-INCOMPLETE-GAMMA 0.001d0 10.0d0) 0.9999999958306921828d0
+TEST-TOL0+)
(ASSERT-TO-TOLERANCE (COMPLEMENTARY-INCOMPLETE-GAMMA 1.0d0 0.001d0) 0.0009995001666250083319d0
+TEST-TOL0+)
(ASSERT-TO-TOLERANCE (COMPLEMENTARY-INCOMPLETE-GAMMA 1.0d0 1.01d0) 0.6357810204284766802d0
+TEST-TOL0+)
(ASSERT-TO-TOLERANCE (COMPLEMENTARY-INCOMPLETE-GAMMA 1.0d0 10.0d0) 0.9999546000702375151d0
+TEST-TOL0+)
(ASSERT-TO-TOLERANCE (COMPLEMENTARY-INCOMPLETE-GAMMA 10.0d0 10.01d0) 0.5433207586693410570d0
+TEST-TOL0+)
(ASSERT-TO-TOLERANCE (COMPLEMENTARY-INCOMPLETE-GAMMA 10.0d0 20.0d0) 0.9950045876916924128d0
+TEST-TOL0+)
(ASSERT-TO-TOLERANCE (COMPLEMENTARY-INCOMPLETE-GAMMA 1000.0d0 1000.1d0) 0.5054666401440661753d0
+TEST-TOL2+)
(ASSERT-TO-TOLERANCE (COMPLEMENTARY-INCOMPLETE-GAMMA 1000.0d0 2000.0d0) 1.0d0 +TEST-TOL0+)
(ASSERT-TO-TOLERANCE (COMPLEMENTARY-INCOMPLETE-GAMMA 34.0d0 32.0d0)
0.3849626436463866776322932129d0 +TEST-TOL2+)
(ASSERT-TO-TOLERANCE (COMPLEMENTARY-INCOMPLETE-GAMMA 37.0d0 3.499999999999999289d+01)
0.3898035054195570860969333039d0 +TEST-TOL2+)
(ASSERT-TO-TOLERANCE (COMPLEMENTARY-INCOMPLETE-GAMMA 10.0d0 1.0d-16) 2.755731922398588814734648067d-167
+TEST-TOL2+)
(ASSERT-TO-TOLERANCE (COMPLEMENTARY-INCOMPLETE-GAMMA 1263131.0d0 1261282.3637d0)
0.04994777516935182963821362168d0 +TEST-TOL4+)
(ASSERT-TO-TOLERANCE (COMPLEMENTARY-INCOMPLETE-GAMMA 1263131.0d0 1263131.0d0)
0.500118321758657770672882362502514254d0 +TEST-TOL4+)
(ASSERT-TO-TOLERANCE (INCOMPLETE-GAMMA 0.0d0 0.001d0) 0.0d0 +TEST-TOL0+)
(ASSERT-TO-TOLERANCE (INCOMPLETE-GAMMA 0.001d0 0.001d0) 0.006312353291139709793d0 +TEST-TOL0+)
(ASSERT-TO-TOLERANCE (INCOMPLETE-GAMMA 0.001d0 1.0d0) 0.00021960835758555639171d0 +TEST-TOL1+)
(ASSERT-TO-TOLERANCE (INCOMPLETE-GAMMA 0.001d0 2.0d0) 0.00004897691783098147880d0 +TEST-TOL2+)
(ASSERT-TO-TOLERANCE (INCOMPLETE-GAMMA 0.001d0 5.0d0) 1.1509813397308608541d-06 +TEST-TOL1+)
(ASSERT-TO-TOLERANCE (INCOMPLETE-GAMMA 1.0d0 0.001d0) 0.9990004998333749917d0 +TEST-TOL0+)
(ASSERT-TO-TOLERANCE (INCOMPLETE-GAMMA 1.0d0 1.01d0) 0.3642189795715233198d0 +TEST-TOL0+)
(ASSERT-TO-TOLERANCE (INCOMPLETE-GAMMA 1.0d0 10.0d0) 0.00004539992976248485154d0 +TEST-TOL0+)
(ASSERT-TO-TOLERANCE (INCOMPLETE-GAMMA 10.0d0 10.01d0) 0.4566792413306589430d0 +TEST-TOL0+)
(ASSERT-TO-TOLERANCE (INCOMPLETE-GAMMA 10.0d0 100.0d0) 1.1253473960842733885d-31 +TEST-TOL2+)
(ASSERT-TO-TOLERANCE (INCOMPLETE-GAMMA 1000.0d0 1000.1d0) 0.4945333598559338247d0 +TEST-TOL2+)
(ASSERT-TO-TOLERANCE (INCOMPLETE-GAMMA 1000.0d0 2000.0d0) 6.847349459614753180d-136 +TEST-TOL2+)
(ASSERT-TO-TOLERANCE (INCOMPLETE-GAMMA 100.0d0 99.0d0) 0.5266956696005394d0 +TEST-TOL2+)
(ASSERT-TO-TOLERANCE (INCOMPLETE-GAMMA 200.0d0 199.0d0) 0.5188414119121281d0 +TEST-TOL2+)
(ASSERT-TO-TOLERANCE (COMPLEMENTARY-INCOMPLETE-GAMMA 100.0d0 99.0d0) 0.4733043303994607d0 +TEST-TOL2+)
(ASSERT-TO-TOLERANCE (COMPLEMENTARY-INCOMPLETE-GAMMA 200.0d0 199.0d0) 0.4811585880878718d0 +TEST-TOL2+)
(ASSERT-TO-TOLERANCE (COMPLEMENTARY-INCOMPLETE-GAMMA 5670.0d0 4574.0d0) 3.063972328743934d-55 +TEST-TOL2+)
(ASSERT-TO-TOLERANCE (INCOMPLETE-GAMMA 5670.0d0 4574.0d0) 1.0000000000000000d0 +TEST-TOL2+)
(ASSERT-TO-TOLERANCE (INCOMPLETE-GAMMA (+ 1.0d+06 -1.0d0) (+ 1.0d+06 -2.0d0))
0.50026596175224547004d0 +TEST-TOL3+)
(ASSERT-TO-TOLERANCE (INCOMPLETE-GAMMA (+ 1.0d+06 2.0d0) (+ 1.0d+06 1.0d0))
0.50026596135330304336d0 +TEST-TOL2+)
(ASSERT-TO-TOLERANCE (INCOMPLETE-GAMMA 1.0d+06 (+ 1.0d+06 -2.0d0))
0.50066490399940144811d0 +TEST-TOL2+)
(ASSERT-TO-TOLERANCE (INCOMPLETE-GAMMA 1.0d+07 (+ 1.0d+07 -2.0d0))
0.50021026104978614908d0 +TEST-TOL2+)
(ASSERT-TO-TOLERANCE (NONNORMALIZED-INCOMPLETE-GAMMA (/ -1048576.0d0) (/ 1048576.0d0))
13.285819596290624271d0 +TEST-TOL0+)
(ASSERT-TO-TOLERANCE (NONNORMALIZED-INCOMPLETE-GAMMA -0.001d0 (/ 1048576.0d0))
13.381275128625328858d0 +TEST-TOL0+)
(ASSERT-TO-TOLERANCE (NONNORMALIZED-INCOMPLETE-GAMMA -1.0d0 (/ 1048576.0d0))
1.0485617142715768655d+06 +TEST-TOL0+)
(ASSERT-TO-TOLERANCE (NONNORMALIZED-INCOMPLETE-GAMMA -0.00001d0 0.001d0) 6.3317681434563592142d0
+TEST-TOL0+)
(ASSERT-TO-TOLERANCE (NONNORMALIZED-INCOMPLETE-GAMMA -0.0001d0 0.001d0) 6.3338276439767189385d0
+TEST-TOL0+)
(ASSERT-TO-TOLERANCE (NONNORMALIZED-INCOMPLETE-GAMMA -0.001d0 0.001d0) 6.3544709102510843793d0
+TEST-TOL0+)
(ASSERT-TO-TOLERANCE (NONNORMALIZED-INCOMPLETE-GAMMA -0.5d0 0.001d0) 59.763880515942196981d0
+TEST-TOL0+)
(ASSERT-TO-TOLERANCE (NONNORMALIZED-INCOMPLETE-GAMMA -1.0d0 0.001d0) 992.66896046923884234d0
+TEST-TOL0+)
(ASSERT-TO-TOLERANCE (NONNORMALIZED-INCOMPLETE-GAMMA -3.5d0 0.001d0) 9.0224404490639003706d+09
+TEST-TOL1+)
(ASSERT-TO-TOLERANCE (NONNORMALIZED-INCOMPLETE-GAMMA -10.5d0 0.001d0) 3.0083661558184815656d+30
+TEST-TOL2+)
(ASSERT-TO-TOLERANCE (NONNORMALIZED-INCOMPLETE-GAMMA -0.001d0 0.1d0) 1.8249109609418620068d0
+TEST-TOL0+)
(ASSERT-TO-TOLERANCE (NONNORMALIZED-INCOMPLETE-GAMMA -0.5d0 0.1d0) 3.4017693366916154163d0
+TEST-TOL0+)
(ASSERT-TO-TOLERANCE (NONNORMALIZED-INCOMPLETE-GAMMA -10.0d0 0.1d0) 8.9490757483586989181d+08
+TEST-TOL1+)
(ASSERT-TO-TOLERANCE (NONNORMALIZED-INCOMPLETE-GAMMA -10.5d0 0.1d0) 2.6967403834226421766d+09
+TEST-TOL1+)
(ASSERT-TO-TOLERANCE (NONNORMALIZED-INCOMPLETE-GAMMA -0.001d0 1.0d0) 0.21928612679072766340d0
+TEST-TOL1+)
(ASSERT-TO-TOLERANCE (NONNORMALIZED-INCOMPLETE-GAMMA -0.5d0 1.0d0) 0.17814771178156069019d0
+TEST-TOL1+)
(ASSERT-TO-TOLERANCE (NONNORMALIZED-INCOMPLETE-GAMMA -1.0d0 1.0d0) 0.14849550677592204792d0
+TEST-TOL1+)
(ASSERT-TO-TOLERANCE (NONNORMALIZED-INCOMPLETE-GAMMA -2.5d0 1.0d0) 0.096556648631275160264d0
+TEST-TOL1+)
(ASSERT-TO-TOLERANCE (NONNORMALIZED-INCOMPLETE-GAMMA -1.0d0 10.0d0) 3.8302404656316087616d-07
+TEST-TOL1+)
(ASSERT-TO-TOLERANCE (NONNORMALIZED-INCOMPLETE-GAMMA -0.001d0 10.0d0) 4.1470562324807320961d-06
+TEST-TOL1+)
(ASSERT-TO-TOLERANCE (NONNORMALIZED-INCOMPLETE-GAMMA -0.5d0 10.0d0) 1.2609042613241570681d-06
+TEST-TOL0+)
(ASSERT-TO-TOLERANCE (NONNORMALIZED-INCOMPLETE-GAMMA -1.0d0 10.0d0) 3.8302404656316087616d-07
+TEST-TOL1+)
(ASSERT-TO-TOLERANCE (NONNORMALIZED-INCOMPLETE-GAMMA -10.5d0 10.0d0) 6.8404927328441566785d-17
+TEST-TOL1+)
(ASSERT-TO-TOLERANCE (NONNORMALIZED-INCOMPLETE-GAMMA -100.0d0 10.0d0) 4.1238327669858313997d-107
+TEST-TOL2+)
(ASSERT-TO-TOLERANCE (NONNORMALIZED-INCOMPLETE-GAMMA -200.0d0 10.0d0) 2.1614091830529343423d-207
+TEST-TOL2+)
(ASSERT-TO-TOLERANCE (NONNORMALIZED-INCOMPLETE-GAMMA 0.0d0 0.001d0) 6.3315393641361493320d0
+TEST-TOL0+)
(ASSERT-TO-TOLERANCE (NONNORMALIZED-INCOMPLETE-GAMMA 0.001d0 0.001d0) 6.3087159394864007261d0
+TEST-TOL0+)
(ASSERT-TO-TOLERANCE (NONNORMALIZED-INCOMPLETE-GAMMA 1.0d0 0.001d0) 0.99900049983337499167d0
+TEST-TOL0+)
(ASSERT-TO-TOLERANCE (NONNORMALIZED-INCOMPLETE-GAMMA 10.0d0 0.001d0) 362880.0d0 +TEST-TOL0+)
(ASSERT-TO-TOLERANCE (NONNORMALIZED-INCOMPLETE-GAMMA 0.0d0 1.0d0) 0.21938393439552027368d0
+TEST-TOL0+)
(ASSERT-TO-TOLERANCE (NONNORMALIZED-INCOMPLETE-GAMMA 0.001d0 1.0d0) 0.21948181320730279613d0
+TEST-TOL1+)
(ASSERT-TO-TOLERANCE (NONNORMALIZED-INCOMPLETE-GAMMA 1.0d0 1.0d0) 0.36787944117144232160d0
+TEST-TOL0+)
(ASSERT-TO-TOLERANCE (NONNORMALIZED-INCOMPLETE-GAMMA 10.0d0 1.0d0) 362879.95956592242045d0
+TEST-TOL0+)
(ASSERT-TO-TOLERANCE (NONNORMALIZED-INCOMPLETE-GAMMA 100.0d0 1.0d0) 9.3326215443944152682d+155
+TEST-TOL0+)
(ASSERT-TO-TOLERANCE (NONNORMALIZED-INCOMPLETE-GAMMA 0.0d0 100.0d0) 3.6835977616820321802d-46
+TEST-TOL2+)
(ASSERT-TO-TOLERANCE (NONNORMALIZED-INCOMPLETE-GAMMA 0.001d0 100.0d0) 3.7006367674063550631d-46
+TEST-TOL2+)
(ASSERT-TO-TOLERANCE (NONNORMALIZED-INCOMPLETE-GAMMA 1.0d0 100.0d0) 3.7200759760208359630d-44
+TEST-TOL2+)
(ASSERT-TO-TOLERANCE (NONNORMALIZED-INCOMPLETE-GAMMA 10.0d0 100.0d0) 4.0836606309106112723d-26
+TEST-TOL2+)
(ASSERT-TO-TOLERANCE (NONNORMALIZED-INCOMPLETE-GAMMA 100.0d0 100.0d0) 4.5421981208626694294d+155
+TEST-TOL1+)
(ASSERT-TO-TOLERANCE (LOG-BETA 1.0d-8 1.0d-8) 19.113827924512310617d0 +TEST-TOL0+)
(ASSERT-TO-TOLERANCE (LOG-BETA 1.0d-8 0.01d0) 18.420681743788563403d0 +TEST-TOL0+)
(ASSERT-TO-TOLERANCE (LOG-BETA 1.0d-8 1.0d0) 18.420680743952365472d0 +TEST-TOL0+)
(ASSERT-TO-TOLERANCE (LOG-BETA 1.0d-8 10.0d0) 18.420680715662683009d0 +TEST-TOL0+)
(ASSERT-TO-TOLERANCE (LOG-BETA 1.0d-8 1000.0d0) 18.420680669107656949d0 +TEST-TOL0+)
(ASSERT-TO-TOLERANCE (LOG-BETA 0.1d0 0.1d0) 2.9813614810376273949d0 +TEST-TOL1+)
(ASSERT-TO-TOLERANCE (LOG-BETA 0.1d0 1.0d0) 2.3025850929940456840d0 +TEST-TOL1+)
(ASSERT-TO-TOLERANCE (LOG-BETA 0.1d0 100.0d0) 1.7926462324527931217d0 +TEST-TOL0+)
(ASSERT-TO-TOLERANCE (LOG-BETA 0.1d0 1000.0d0) 1.5619821298353164928d0 +TEST-TOL0+)
(ASSERT-TO-TOLERANCE (LOG-BETA 1.0d0 1.00025d0) -0.0002499687552073570d0 +TEST-TOL4+)
(ASSERT-TO-TOLERANCE (LOG-BETA 1.0d0 1.01d0) -0.009950330853168082848d0 +TEST-TOL3+)
(ASSERT-TO-TOLERANCE (LOG-BETA 1.0d0 1000.0d0) -6.907755278982137052d0 +TEST-TOL0+)
(ASSERT-TO-TOLERANCE (LOG-BETA 100.0d0 100.0d0) -139.66525908670663927d0 +TEST-TOL2+)
(ASSERT-TO-TOLERANCE (LOG-BETA 100.0d0 1000.0d0) -336.4348576477366051d0 +TEST-TOL0+)
(ASSERT-TO-TOLERANCE (LOG-BETA 100.0d0 1.0d+8) -1482.9339185256447309d0 +TEST-TOL0+)
(ASSERT-TO-TOLERANCE (BETA 1.0d0 1.0d0) 1.0d0 +TEST-TOL0+)
(ASSERT-TO-TOLERANCE (BETA 1.0d0 1.001d0) 0.9990009990009990010d0 +TEST-TOL0+)
(ASSERT-TO-TOLERANCE (BETA 1.0d0 5.0d0) 0.2d0 +TEST-TOL1+)
(ASSERT-TO-TOLERANCE (BETA 1.0d0 100.0d0) 0.01d0 +TEST-TOL1+)
(ASSERT-TO-TOLERANCE (BETA 10.0d0 100.0d0) 2.3455339739604649879d-15 +TEST-TOL2+)
(ASSERT-TO-TOLERANCE (BETA 2.5d0 -0.1d0) -11.43621278354402041480d0 +TEST-TOL2+)
(ASSERT-TO-TOLERANCE (BETA 2.5d0 -1.1d0) 14.555179906328753255202d0 +TEST-TOL2+)
(ASSERT-TO-TOLERANCE (BETA -0.25d0 -0.1d0) -13.238937960945229110d0 +TEST-TOL2+)
(ASSERT-TO-TOLERANCE (BETA -1.25d0 -0.1d0) -14.298052997820847439d0 +TEST-TOL2+)
(ASSERT-TO-TOLERANCE (BETA -100.1d0 -99.1d0) -1.005181917797644630375787297d60 +TEST-TOL3+)
(ASSERT-TO-TOLERANCE (BETA -100.1d0 99.3d0) 0.0004474258199579694011200969001d0 +TEST-TOL2+)
(ASSERT-TO-TOLERANCE (BETA 100.1d0 -99.3d0) 1.328660939628876472028853747d0 +TEST-TOL2+)
(ASSERT-TO-TOLERANCE (BETA -100.1d0 1.2d0) 0.00365530364287960795444856281d0 +TEST-TOL3+)
(ASSERT-TO-TOLERANCE (BETA 100.1d0 -1.2d0) 1203.895236907821059270698160d0 +TEST-TOL2+)
(ASSERT-TO-TOLERANCE (BETA -100.1d0 -1.2d0) -3236.073671884748847700283841d0 +TEST-TOL2+)
(ASSERT-TO-TOLERANCE (BETA -100.001d0 0.0099d0) -853.946649365611147996495177d0 +TEST-TOL4+)
(ASSERT-TO-TOLERANCE (BETA 1.0d-32 1.5d0) 1.0d32 +TEST-TOL2+)
(ASSERT-TO-TOLERANCE (BETA 1.0d-6 0.5d0) 1000001.386293677092419390336d0 +TEST-TOL2+)
(ASSERT-TO-TOLERANCE (BETA -1.5d0 0.5d0) 0.0d0 +TEST-TOL0+)
(ASSERT-TO-TOLERANCE (INCOMPLETE-BETA 1.0d0 1.0d0 0.0d0) 0.0d0 +TEST-TOL2+)
(ASSERT-TO-TOLERANCE (INCOMPLETE-BETA 1.0d0 1.0d0 1.0d0) 1.0d0 +TEST-TOL2+)
(ASSERT-TO-TOLERANCE (INCOMPLETE-BETA 0.1d0 0.1d0 1.0d0) 1.0d0 +TEST-TOL2+)
(ASSERT-TO-TOLERANCE (INCOMPLETE-BETA 1.0d0 1.0d0 0.5d0) 0.5d0 +TEST-TOL2+)
(ASSERT-TO-TOLERANCE (INCOMPLETE-BETA 0.1d0 1.0d0 0.5d0) 0.9330329915368074160d0 +TEST-TOL2+)
(ASSERT-TO-TOLERANCE (INCOMPLETE-BETA 10.0d0 1.0d0 0.5d0) 0.0009765625000000000000d0 +TEST-TOL2+)
(ASSERT-TO-TOLERANCE (INCOMPLETE-BETA 50.0d0 1.0d0 0.5d0) 8.881784197001252323d-16 +TEST-TOL2+)
(ASSERT-TO-TOLERANCE (INCOMPLETE-BETA 1.0d0 0.1d0 0.5d0) 0.06696700846319258402d0 +TEST-TOL2+)
(ASSERT-TO-TOLERANCE (INCOMPLETE-BETA 1.0d0 10.0d0 0.5d0) 0.99902343750000000000d0 +TEST-TOL2+)
(ASSERT-TO-TOLERANCE (INCOMPLETE-BETA 1.0d0 50.0d0 0.5d0) 0.99999999999999911180d0 +TEST-TOL2+)
(ASSERT-TO-TOLERANCE (INCOMPLETE-BETA 1.0d0 1.0d0 0.1d0) 0.10d0 +TEST-TOL2+)
(ASSERT-TO-TOLERANCE (INCOMPLETE-BETA 1.0d0 2.0d0 0.1d0) 0.19d0 +TEST-TOL2+)
(ASSERT-TO-TOLERANCE (INCOMPLETE-BETA 1.0d0 2.0d0 0.9d0) 0.99d0 +TEST-TOL2+)
(ASSERT-TO-TOLERANCE (INCOMPLETE-BETA 50.0d0 60.0d0 0.5d0) 0.8309072939016694143d0 +TEST-TOL2+)
(ASSERT-TO-TOLERANCE (INCOMPLETE-BETA 90.0d0 90.0d0 0.5d0) 0.5d0 +TEST-TOL2+)
(ASSERT-TO-TOLERANCE (INCOMPLETE-BETA 500.0d0 500.0d0 0.6d0) 0.9999999999157549630d0 +TEST-TOL2+)
(ASSERT-TO-TOLERANCE (INCOMPLETE-BETA 5000.0d0 5000.0d0 0.4d0) 4.518543727260666383d-91 +TEST-TOL5+)
(ASSERT-TO-TOLERANCE (INCOMPLETE-BETA 5000.0d0 5000.0d0 0.6d0) 1.0d0 +TEST-TOL2+)
(ASSERT-TO-TOLERANCE (INCOMPLETE-BETA 5000.0d0 2000.0d0 0.6d0) 8.445388773903332659d-89 +TEST-TOL5+)
(ASSERT-TO-TOLERANCE (INCOMPLETE-BETA -0.1d0 -0.1d0 1.0d0) 1.0d0 +TEST-TOL2+)
(ASSERT-TO-TOLERANCE (INCOMPLETE-BETA -0.1d0 -0.2d0 1.0d0) 1.0d0 +TEST-TOL2+)
(ASSERT-TO-TOLERANCE (INCOMPLETE-BETA -0.2d0 -0.1d0 1.0d0) 1.0d0 +TEST-TOL2+)
(ASSERT-TO-TOLERANCE (INCOMPLETE-BETA -0.1d0 -0.2d0 0.5d0) 0.675252001958389971991335d0 +TEST-TOL2+)
(ASSERT-TO-TOLERANCE (INCOMPLETE-BETA -0.2d0 -0.1d0 0.5d0) 0.324747998041610028008665d0 +TEST-TOL2+)
(ASSERT-TO-TOLERANCE (INCOMPLETE-BETA -0.1d0 -0.1d0 0.0d0) 0.0d0 +TEST-TOL2+)
(ASSERT-TO-TOLERANCE (INCOMPLETE-BETA -0.1d0 -0.2d0 0.0d0) 0.0d0 +TEST-TOL2+)
(ASSERT-TO-TOLERANCE (INCOMPLETE-BETA -0.2d0 -0.1d0 0.0d0) 0.0d0 +TEST-TOL2+)
(ASSERT-TO-TOLERANCE (INCOMPLETE-BETA -0.1d0 -0.2d0 0.3d0) 0.7469186777964287252d0 +TEST-TOL2+)
(ASSERT-TO-TOLERANCE (INCOMPLETE-BETA -0.2d0 -0.1d0 0.3d0) 0.3995299653262016818d0 +TEST-TOL2+)
)
|
0cb69cdb7f63d13e9976e74e23ffb6921b8e2ee4602861b48968bc2511a4bed9 | gilith/hol-light | montgomery-def.ml | (* ========================================================================= *)
(* DEFINITION OF MONTGOMERY MULTIPLICATION *)
(* ========================================================================= *)
export_theory "montgomery-def";;
(* ------------------------------------------------------------------------- *)
Definition of Montgomery multiplication [ 1 ] .
(* *)
1 .
(* ------------------------------------------------------------------------- *)
let montgomery_reduce_def = new_definition
`!n r k a.
montgomery_reduce n r k a =
(a + ((a * k) MOD r) * n) DIV r`;;
export_thm montgomery_reduce_def;;
| null | https://raw.githubusercontent.com/gilith/hol-light/f3f131963f2298b4d65ee5fead6e986a4a14237a/opentheory/theories/montgomery/montgomery-def.ml | ocaml | =========================================================================
DEFINITION OF MONTGOMERY MULTIPLICATION
=========================================================================
-------------------------------------------------------------------------
------------------------------------------------------------------------- |
export_theory "montgomery-def";;
Definition of Montgomery multiplication [ 1 ] .
1 .
let montgomery_reduce_def = new_definition
`!n r k a.
montgomery_reduce n r k a =
(a + ((a * k) MOD r) * n) DIV r`;;
export_thm montgomery_reduce_def;;
|
aaacdefaebf6e1288da839ae255336e9234131c0dedbbc123ef63686d74449a7 | sbcl/sbcl | arith-combinations.pure.lisp | This software is part of the SBCL system . See the README file for
;;;; more information.
;;;;
While most of SBCL is derived from the CMU CL system , the test
;;;; files (like this one) were written from scratch after the fork
from CMU CL .
;;;;
;;;; This software is in the public domain and is provided with
;;;; absolutely no warranty. See the COPYING and CREDITS files for
;;;; more information.
(enable-test-parallelism)
(defun test-ops (ops types arguments &optional (result-types types))
(flet ((normalize-type (type)
(sb-kernel:type-specifier (sb-kernel:specifier-type type))))
(let ((types (mapcar #'normalize-type types))
(result-types (mapcar #'normalize-type result-types))
(progress 0)
(cache (make-hash-table :test #'equal)))
(loop for op in ops
do
(loop for a-type in types
do
(loop for b-type in types
do
(loop for result-type in result-types
do
(loop for a in arguments
when (typep a a-type)
do
(loop for b in arguments
for result = (funcall op a b)
when (typep b b-type)
do
(loop for lambda in (list `(lambda (a b)
(declare (,a-type a)
(,b-type b))
(the ,result-type (,op a b)))
`(lambda (a)
(declare (,a-type a))
(the ,result-type (,op a ,b)))
`(lambda (b)
(declare (,b-type b))
(the ,result-type (,op ,a b))))
for args in (list (list a b)
(list a)
(list b))
for fun = (or (gethash lambda cache)
(setf (gethash lambda cache)
(checked-compile lambda :allow-warnings t)))
do
(when (and (zerop (mod (incf progress) (or #+(or arm x86) 100 10000)))
(interactive-stream-p *standard-output*))
(write-char #\Return)
(write progress)
(write-char #\Space)
(write (hash-table-count cache))
(force-output))
(handler-case
(apply fun args)
(error (c)
(if (typep result result-type)
(error "~a => ~a /= ~a" (list* lambda args) c result)
(let ((x (type-error-datum c))
(type (type-error-expected-type c)))
(cond ((not (equal type result-type))
(error "~a => type error ~a /= ~a" (list* lambda args)
c
result-type))
((not (eql x result))
(error "~a => type error ~a /= ~a" (list* lambda args)
c
x))))))
(:no-error (x)
(if (typep result result-type)
(unless (eql x result)
(error "~a = ~a /= ~a" (list* lambda args) x result))
(error "~a => ~a /= type error" (list* lambda args) x))))))))))))))
(with-test (:name :overflow-arith)
(test-ops '(+ - *)
`(t fixnum (integer ,(- (expt 2 sb-vm:n-word-bits) 10)
,(- (expt 2 sb-vm:n-word-bits) 1))
(signed-byte ,sb-vm:n-word-bits)
(unsigned-byte ,sb-vm:n-word-bits)
(signed-byte 8)
(unsigned-byte 8))
(list 0 1 2 3 4 -1 -2 -3 -4
(- (expt 2 sb-vm:n-word-bits) 1)
(- (expt 2 sb-vm:n-word-bits) 5)
(- (expt 2 (1- sb-vm:n-word-bits)) 1)
(- (expt 2 (1- sb-vm:n-word-bits)) 5)
(- (expt 2 (1- sb-vm:n-word-bits)))
(- 10 (expt 2 (1- sb-vm:n-word-bits)))
(expt 2 (1- sb-vm:n-word-bits))
most-positive-fixnum
most-negative-fixnum
(1- most-positive-fixnum)
(1+ most-negative-fixnum)
(floor most-positive-fixnum 2)
(floor most-negative-fixnum 2))))
(with-test (:name :fixnum-integer-cmp)
(test-ops '(> <)
`(t fixnum
integer
bignum
(integer ,(- (expt 2 sb-vm:n-word-bits) 10)
,(- (expt 2 sb-vm:n-word-bits) 1))
(signed-byte ,sb-vm:n-word-bits)
(unsigned-byte ,sb-vm:n-word-bits)
(signed-byte 8)
(unsigned-byte 8))
(list 0 1 2 3 4 -1 -2 -3 -4
(- (expt 2 sb-vm:n-word-bits) 1)
(- (expt 2 sb-vm:n-word-bits) 5)
(- (expt 2 (1- sb-vm:n-word-bits)) 1)
(- (expt 2 (1- sb-vm:n-word-bits)) 5)
(- (expt 2 (1- sb-vm:n-word-bits)))
(- 10 (expt 2 (1- sb-vm:n-word-bits)))
(expt 2 (1- sb-vm:n-word-bits))
most-positive-fixnum
most-negative-fixnum
(1- most-positive-fixnum)
(1+ most-negative-fixnum)
(floor most-positive-fixnum 2)
(floor most-negative-fixnum 2))
'(t)))
(with-test (:name :integer-ratio-float-compare)
(test-ops '(> <)
`(t fixnum
integer
bignum
(integer ,(- (expt 2 sb-vm:n-word-bits) 10)
,(- (expt 2 sb-vm:n-word-bits) 1))
(signed-byte ,sb-vm:n-word-bits)
(unsigned-byte ,sb-vm:n-word-bits)
(signed-byte 8)
(unsigned-byte 8))
(list 0 1 2 3 4 -1 -2 -3 -4
(- (expt 2 sb-vm:n-word-bits) 1)
(- (expt 2 sb-vm:n-word-bits) 5)
(- (expt 2 (1- sb-vm:n-word-bits)) 1)
(- (expt 2 (1- sb-vm:n-word-bits)) 5)
(- (expt 2 (1- sb-vm:n-word-bits)))
(- 10 (expt 2 (1- sb-vm:n-word-bits)))
(expt 2 (1- sb-vm:n-word-bits))
most-positive-fixnum
most-negative-fixnum
(1- most-positive-fixnum)
(1+ most-negative-fixnum)
(floor most-positive-fixnum 2)
(floor most-negative-fixnum 2))
'(t)))
| null | https://raw.githubusercontent.com/sbcl/sbcl/e7d6d21aef0f32e50e5a53c8ab6642300433f0bc/tests/arith-combinations.pure.lisp | lisp | more information.
files (like this one) were written from scratch after the fork
This software is in the public domain and is provided with
absolutely no warranty. See the COPYING and CREDITS files for
more information. | This software is part of the SBCL system . See the README file for
While most of SBCL is derived from the CMU CL system , the test
from CMU CL .
(enable-test-parallelism)
(defun test-ops (ops types arguments &optional (result-types types))
(flet ((normalize-type (type)
(sb-kernel:type-specifier (sb-kernel:specifier-type type))))
(let ((types (mapcar #'normalize-type types))
(result-types (mapcar #'normalize-type result-types))
(progress 0)
(cache (make-hash-table :test #'equal)))
(loop for op in ops
do
(loop for a-type in types
do
(loop for b-type in types
do
(loop for result-type in result-types
do
(loop for a in arguments
when (typep a a-type)
do
(loop for b in arguments
for result = (funcall op a b)
when (typep b b-type)
do
(loop for lambda in (list `(lambda (a b)
(declare (,a-type a)
(,b-type b))
(the ,result-type (,op a b)))
`(lambda (a)
(declare (,a-type a))
(the ,result-type (,op a ,b)))
`(lambda (b)
(declare (,b-type b))
(the ,result-type (,op ,a b))))
for args in (list (list a b)
(list a)
(list b))
for fun = (or (gethash lambda cache)
(setf (gethash lambda cache)
(checked-compile lambda :allow-warnings t)))
do
(when (and (zerop (mod (incf progress) (or #+(or arm x86) 100 10000)))
(interactive-stream-p *standard-output*))
(write-char #\Return)
(write progress)
(write-char #\Space)
(write (hash-table-count cache))
(force-output))
(handler-case
(apply fun args)
(error (c)
(if (typep result result-type)
(error "~a => ~a /= ~a" (list* lambda args) c result)
(let ((x (type-error-datum c))
(type (type-error-expected-type c)))
(cond ((not (equal type result-type))
(error "~a => type error ~a /= ~a" (list* lambda args)
c
result-type))
((not (eql x result))
(error "~a => type error ~a /= ~a" (list* lambda args)
c
x))))))
(:no-error (x)
(if (typep result result-type)
(unless (eql x result)
(error "~a = ~a /= ~a" (list* lambda args) x result))
(error "~a => ~a /= type error" (list* lambda args) x))))))))))))))
(with-test (:name :overflow-arith)
(test-ops '(+ - *)
`(t fixnum (integer ,(- (expt 2 sb-vm:n-word-bits) 10)
,(- (expt 2 sb-vm:n-word-bits) 1))
(signed-byte ,sb-vm:n-word-bits)
(unsigned-byte ,sb-vm:n-word-bits)
(signed-byte 8)
(unsigned-byte 8))
(list 0 1 2 3 4 -1 -2 -3 -4
(- (expt 2 sb-vm:n-word-bits) 1)
(- (expt 2 sb-vm:n-word-bits) 5)
(- (expt 2 (1- sb-vm:n-word-bits)) 1)
(- (expt 2 (1- sb-vm:n-word-bits)) 5)
(- (expt 2 (1- sb-vm:n-word-bits)))
(- 10 (expt 2 (1- sb-vm:n-word-bits)))
(expt 2 (1- sb-vm:n-word-bits))
most-positive-fixnum
most-negative-fixnum
(1- most-positive-fixnum)
(1+ most-negative-fixnum)
(floor most-positive-fixnum 2)
(floor most-negative-fixnum 2))))
(with-test (:name :fixnum-integer-cmp)
(test-ops '(> <)
`(t fixnum
integer
bignum
(integer ,(- (expt 2 sb-vm:n-word-bits) 10)
,(- (expt 2 sb-vm:n-word-bits) 1))
(signed-byte ,sb-vm:n-word-bits)
(unsigned-byte ,sb-vm:n-word-bits)
(signed-byte 8)
(unsigned-byte 8))
(list 0 1 2 3 4 -1 -2 -3 -4
(- (expt 2 sb-vm:n-word-bits) 1)
(- (expt 2 sb-vm:n-word-bits) 5)
(- (expt 2 (1- sb-vm:n-word-bits)) 1)
(- (expt 2 (1- sb-vm:n-word-bits)) 5)
(- (expt 2 (1- sb-vm:n-word-bits)))
(- 10 (expt 2 (1- sb-vm:n-word-bits)))
(expt 2 (1- sb-vm:n-word-bits))
most-positive-fixnum
most-negative-fixnum
(1- most-positive-fixnum)
(1+ most-negative-fixnum)
(floor most-positive-fixnum 2)
(floor most-negative-fixnum 2))
'(t)))
(with-test (:name :integer-ratio-float-compare)
(test-ops '(> <)
`(t fixnum
integer
bignum
(integer ,(- (expt 2 sb-vm:n-word-bits) 10)
,(- (expt 2 sb-vm:n-word-bits) 1))
(signed-byte ,sb-vm:n-word-bits)
(unsigned-byte ,sb-vm:n-word-bits)
(signed-byte 8)
(unsigned-byte 8))
(list 0 1 2 3 4 -1 -2 -3 -4
(- (expt 2 sb-vm:n-word-bits) 1)
(- (expt 2 sb-vm:n-word-bits) 5)
(- (expt 2 (1- sb-vm:n-word-bits)) 1)
(- (expt 2 (1- sb-vm:n-word-bits)) 5)
(- (expt 2 (1- sb-vm:n-word-bits)))
(- 10 (expt 2 (1- sb-vm:n-word-bits)))
(expt 2 (1- sb-vm:n-word-bits))
most-positive-fixnum
most-negative-fixnum
(1- most-positive-fixnum)
(1+ most-negative-fixnum)
(floor most-positive-fixnum 2)
(floor most-negative-fixnum 2))
'(t)))
|
6645096c05bd09b7f27b8c33c4ae434d1083a0288324f89240ac9ee1c28e6835 | ryanpbrewster/haskell | P031.hs | module Problems.P031
( solve
) where
- In England the currency is made up of pound , £ , and pence , p , and there are
- eight coins in general circulation :
-
- 1p , 2p , 5p , 10p , 20p , 50p , £ 1 ( 100p ) and £ 2 ( 200p ) .
-
- It is possible to make £ 2 in the following way :
-
- 1×£1 + 1×50p + 2×20p + 1×5p + 1×2p + 3×1p
-
- How many different ways can £ 2 be made using any number of coins ?
- In England the currency is made up of pound, £, and pence, p, and there are
- eight coins in general circulation:
-
- 1p, 2p, 5p, 10p, 20p, 50p, £1 (100p) and £2 (200p).
-
- It is possible to make £2 in the following way:
-
- 1×£1 + 1×50p + 2×20p + 1×5p + 1×2p + 3×1p
-
- How many different ways can £2 be made using any number of coins?
-}
- Consider the notationally simpler problem :
- How many ways can you construct the number 10 out of the set
- { 1,2,3,4,5 }
- Clearly by using the first 0 elements ( i.e. the set { } ) you can
- make [ 0 .. 10 ] in
- { 0,1,2,3,4,5,6,7,8,9,10 }
- { 1,0,0,0,0,0,0,0,0,0,0 }
- ways . That is , you can only make the number 0 . Now , suppose we add
- in the next element , so we have { 1 } . We can now make
- { 0,1,2,3,4,5,6,7,8,9,10 }
- { 1,1,1,1,1,1,1,1,1,1,1 }
-
- Now suppose we add in 2 .
- { 0,1,2,3,4,5,6,7,8,9,10 }
- { 1,1,2,2,3,3,4,4,5,5,6 }
-
- Now 3 .
- { 0,1,2,3,4,5,6,7,8,9,10 }
- { 1,1,2,3,4,5,7,8,10,12,14 }
- Consider the notationally simpler problem:
- How many ways can you construct the number 10 out of the set
- {1,2,3,4,5}
- Clearly by using the first 0 elements (i.e. the set {}) you can
- make [0..10] in
- {0,1,2,3,4,5,6,7,8,9,10}
- {1,0,0,0,0,0,0,0,0,0,0}
- ways. That is, you can only make the number 0. Now, suppose we add
- in the next element, so we have {1}. We can now make
- {0,1,2,3,4,5,6,7,8,9,10}
- {1,1,1,1,1,1,1,1,1,1,1}
-
- Now suppose we add in 2.
- {0,1,2,3,4,5,6,7,8,9,10}
- {1,1,2,2,3,3,4,4,5,5,6}
-
- Now 3.
- {0,1,2,3,4,5,6,7,8,9,10}
- {1,1,2,3,4,5,7,8,10,12,14}
-}
solve :: String
solve = show solveProblem
can only make 0
ways (x:xs) =
let others = ways xs
(start, rest) = splitAt x others -- x doesn't matter for [0..x-1]
ans = start ++ zipWith (+) rest ans
in ans
solveProblem =
let w = ways [1, 2, 5, 10, 20, 50, 100, 200]
in w !! 200
| null | https://raw.githubusercontent.com/ryanpbrewster/haskell/6edd0afe234008a48b4871032dedfd143ca6e412/project-euler/src/Problems/P031.hs | haskell | x doesn't matter for [0..x-1] | module Problems.P031
( solve
) where
- In England the currency is made up of pound , £ , and pence , p , and there are
- eight coins in general circulation :
-
- 1p , 2p , 5p , 10p , 20p , 50p , £ 1 ( 100p ) and £ 2 ( 200p ) .
-
- It is possible to make £ 2 in the following way :
-
- 1×£1 + 1×50p + 2×20p + 1×5p + 1×2p + 3×1p
-
- How many different ways can £ 2 be made using any number of coins ?
- In England the currency is made up of pound, £, and pence, p, and there are
- eight coins in general circulation:
-
- 1p, 2p, 5p, 10p, 20p, 50p, £1 (100p) and £2 (200p).
-
- It is possible to make £2 in the following way:
-
- 1×£1 + 1×50p + 2×20p + 1×5p + 1×2p + 3×1p
-
- How many different ways can £2 be made using any number of coins?
-}
- Consider the notationally simpler problem :
- How many ways can you construct the number 10 out of the set
- { 1,2,3,4,5 }
- Clearly by using the first 0 elements ( i.e. the set { } ) you can
- make [ 0 .. 10 ] in
- { 0,1,2,3,4,5,6,7,8,9,10 }
- { 1,0,0,0,0,0,0,0,0,0,0 }
- ways . That is , you can only make the number 0 . Now , suppose we add
- in the next element , so we have { 1 } . We can now make
- { 0,1,2,3,4,5,6,7,8,9,10 }
- { 1,1,1,1,1,1,1,1,1,1,1 }
-
- Now suppose we add in 2 .
- { 0,1,2,3,4,5,6,7,8,9,10 }
- { 1,1,2,2,3,3,4,4,5,5,6 }
-
- Now 3 .
- { 0,1,2,3,4,5,6,7,8,9,10 }
- { 1,1,2,3,4,5,7,8,10,12,14 }
- Consider the notationally simpler problem:
- How many ways can you construct the number 10 out of the set
- {1,2,3,4,5}
- Clearly by using the first 0 elements (i.e. the set {}) you can
- make [0..10] in
- {0,1,2,3,4,5,6,7,8,9,10}
- {1,0,0,0,0,0,0,0,0,0,0}
- ways. That is, you can only make the number 0. Now, suppose we add
- in the next element, so we have {1}. We can now make
- {0,1,2,3,4,5,6,7,8,9,10}
- {1,1,1,1,1,1,1,1,1,1,1}
-
- Now suppose we add in 2.
- {0,1,2,3,4,5,6,7,8,9,10}
- {1,1,2,2,3,3,4,4,5,5,6}
-
- Now 3.
- {0,1,2,3,4,5,6,7,8,9,10}
- {1,1,2,3,4,5,7,8,10,12,14}
-}
solve :: String
solve = show solveProblem
can only make 0
ways (x:xs) =
let others = ways xs
ans = start ++ zipWith (+) rest ans
in ans
solveProblem =
let w = ways [1, 2, 5, 10, 20, 50, 100, 200]
in w !! 200
|
706f5b681d83fa0926a41e623a11acb0f11b638391813ef17d3ff917a68ca947 | avsm/platform | opamSwitchAction.ml | (**************************************************************************)
(* *)
Copyright 2012 - 2015 OCamlPro
Copyright 2012 INRIA
(* *)
(* All rights reserved. This file is distributed under the terms of the *)
GNU Lesser General Public License version 2.1 , with the special
(* exception on linking described in the file LICENSE. *)
(* *)
(**************************************************************************)
open OpamTypes
open OpamStateTypes
open OpamPackage.Set.Op
let log fmt = OpamConsole.log "SWACT" fmt
let slog = OpamConsole.slog
let gen_switch_config root ?(synopsis="") ?repos _switch =
let vars =
List.map (fun (s,p) -> OpamVariable.of_string s, S p) [
("user" ,
try (Unix.getpwuid (Unix.getuid ())).Unix.pw_name
with Not_found -> "user");
("group",
try (Unix.getgrgid (Unix.getgid ())).Unix.gr_name
with Not_found -> "group");
]
in
{ OpamFile.Switch_config.
opam_version = OpamVersion.current_nopatch;
synopsis;
variables = vars;
paths = [];
opam_root = Some root;
repos;
wrappers = OpamFile.Wrappers.empty;
env = []; }
let install_switch_config root switch config =
log "install_switch_config switch=%a" (slog OpamSwitch.to_string) switch;
OpamFile.Switch_config.write
(OpamPath.Switch.switch_config root switch)
config
let create_empty_switch gt ?synopsis ?repos switch =
log "create_empty_switch at %a" (slog OpamSwitch.to_string) switch;
let root = gt.root in
let switch_dir = OpamPath.Switch.root root switch in
(* Do some clean-up if necessary *)
if OpamFilename.exists_dir switch_dir then
failwith (Printf.sprintf "Directory %s already exists"
(OpamFilename.Dir.to_string switch_dir));
try
(* Create base directories *)
OpamFilename.mkdir switch_dir;
let config = gen_switch_config root ?synopsis ?repos switch in
OpamFilename.mkdir (OpamPath.Switch.lib_dir root switch config);
OpamFilename.mkdir (OpamPath.Switch.stublibs root switch config);
OpamFilename.mkdir (OpamPath.Switch.toplevel root switch config);
OpamFilename.mkdir (OpamPath.Switch.build_dir root switch);
OpamFilename.mkdir (OpamPath.Switch.bin root switch config);
OpamFilename.mkdir (OpamPath.Switch.sbin root switch config);
OpamFilename.mkdir (OpamPath.Switch.doc_dir root switch config);
OpamFilename.mkdir (OpamPath.Switch.man_dir root switch config);
OpamFilename.mkdir (OpamPath.Switch.install_dir root switch);
OpamFilename.mkdir (OpamPath.Switch.config_dir root switch);
List.iter (fun num ->
OpamFilename.mkdir (OpamPath.Switch.man_dir ~num root switch config)
) ["1";"1M";"2";"3";"4";"5";"6";"7";"9"];
install_switch_config root switch config;
let root_config =
OpamFile.Config.with_installed_switches
(switch::OpamFile.Config.installed_switches gt.config)
gt.config
in
let gt = { gt with config = root_config } in
OpamGlobalState.write gt;
gt
with e ->
if not (OpamConsole.debug ()) then
OpamFilename.rmdir switch_dir;
raise e
let write_selections st =
if not OpamStateConfig.(!r.dryrun) then
let f = OpamPath.Switch.selections st.switch_global.root st.switch in
let env = OpamPath.Switch.environment st.switch_global.root st.switch in
OpamFile.SwitchSelections.write f (OpamSwitchState.selections st);
OpamFile.Environment.write env (OpamEnv.compute_updates st)
let add_to_reinstall st ~unpinned_only packages =
log "add-to-reinstall unpinned_only:%b packages:%a" unpinned_only
(slog OpamPackage.Set.to_string) packages;
let root = st.switch_global.root in
let packages =
if unpinned_only then
OpamPackage.Set.filter
(fun nv -> not (OpamPackage.has_name st.pinned nv.name))
packages
else packages
in
let reinstall_file = OpamPath.Switch.reinstall root st.switch in
let current_reinstall = OpamFile.PkgList.safe_read reinstall_file in
let add_reinst_packages =
OpamPackage.packages_of_names st.installed
(OpamPackage.names_of_packages packages)
in
let reinstall =
current_reinstall ++ add_reinst_packages
in
if OpamPackage.Set.equal current_reinstall reinstall then ()
else if OpamPackage.Set.is_empty reinstall then
OpamFilename.remove (OpamFile.filename reinstall_file)
else
OpamFile.PkgList.write reinstall_file reinstall;
{ st with reinstall = st.reinstall ++ add_reinst_packages }
let set_current_switch lock gt ?rt switch =
if OpamSwitch.is_external switch then
OpamConsole.error_and_exit `Bad_arguments
"Can not set external switch '%s' globally. To set it in the current \
shell use:\n %s"
(OpamSwitch.to_string switch)
(OpamEnv.eval_string gt ~set_opamswitch:true (Some switch));
let config = OpamFile.Config.with_switch switch gt.config in
let gt = { gt with config } in
OpamGlobalState.write gt;
let rt = match rt with
| Some rt -> { rt with repos_global = gt }
| None -> OpamRepositoryState.load `Lock_none gt
in
let st = OpamSwitchState.load lock gt rt switch in
OpamEnv.write_dynamic_init_scripts st;
st
let install_metadata st nv =
let opam = OpamSwitchState.opam st nv in
OpamFile.OPAM.write
(OpamPath.Switch.installed_opam st.switch_global.root st.switch nv)
opam;
List.iter (fun (f, rel_path, _hash) ->
let dst =
OpamFilename.create
(OpamPath.Switch.installed_opam_files_dir
st.switch_global.root st.switch nv)
rel_path
in
OpamFilename.mkdir (OpamFilename.dirname dst);
OpamFilename.copy ~src:f ~dst)
(OpamFile.OPAM.get_extra_files opam)
let remove_metadata st packages =
OpamPackage.Set.iter (fun nv ->
OpamFilename.rmdir
(OpamPath.Switch.installed_package_dir
st.switch_global.root st.switch nv))
packages
let update_switch_state ?installed ?installed_roots ?reinstall ?pinned st =
let open OpamStd.Option.Op in
let open OpamPackage.Set.Op in
let installed = installed +! st.installed in
let reinstall0 = st.reinstall in
let reinstall = (reinstall +! reinstall0) %% installed in
let compiler_packages =
if OpamPackage.Set.is_empty (st.compiler_packages -- installed) then
st.compiler_packages
else (* adjust version of installed compiler packages *)
let names = OpamPackage.names_of_packages st.compiler_packages in
let installed_base = OpamPackage.packages_of_names installed names in
installed_base ++
(* keep version of uninstalled compiler packages *)
OpamPackage.packages_of_names st.compiler_packages
(OpamPackage.Name.Set.diff names
(OpamPackage.names_of_packages installed_base))
in
let old_selections = OpamSwitchState.selections st in
let st =
{ st with
installed;
installed_roots = installed_roots +! st.installed_roots;
reinstall;
pinned = pinned +! st.pinned;
compiler_packages; }
in
if not OpamStateConfig.(!r.dryrun) then (
if OpamSwitchState.selections st <> old_selections then write_selections st;
if not (OpamPackage.Set.equal reinstall0 reinstall) then
OpamFile.PkgList.write
(OpamPath.Switch.reinstall st.switch_global.root st.switch)
(OpamPackage.Set.filter (OpamSwitchState.is_dev_package st) reinstall)
);
st
let add_to_installed st ?(root=false) nv =
let st =
update_switch_state st
~installed:(OpamPackage.Set.add nv st.installed)
~reinstall:(OpamPackage.Set.remove nv st.reinstall)
~installed_roots:
(let roots =
OpamPackage.Set.filter (fun nv1 -> nv1.name <> nv.name)
st.installed_roots
in
if root then OpamPackage.Set.add nv roots else st.installed_roots)
in
let opam = OpamSwitchState.opam st nv in
let conf =
OpamFile.Dot_config.safe_read
(OpamPath.Switch.config st.switch_global.root st.switch nv.name)
in
let st = { st with conf_files = OpamPackage.Map.add nv conf st.conf_files } in
if not OpamStateConfig.(!r.dryrun) then (
install_metadata st nv;
if OpamFile.OPAM.env opam <> [] &&
OpamSwitchState.is_switch_globally_set st
then
OpamEnv.write_dynamic_init_scripts st;
);
st
let remove_from_installed ?(keep_as_root=false) st nv =
let rm = OpamPackage.Set.remove nv in
let st =
update_switch_state st
~installed:(rm st.installed)
?installed_roots:(if keep_as_root then None
else Some (rm st.installed_roots))
~reinstall:(rm st.reinstall)
in
let has_setenv =
match OpamStd.Option.map OpamFile.OPAM.env (OpamSwitchState.opam_opt st nv)
with Some (_::_) -> true | _ -> false
in
if not OpamStateConfig.(!r.dryrun) &&
has_setenv && OpamSwitchState.is_switch_globally_set st
then
(* note: don't remove_metadata just yet *)
OpamEnv.write_dynamic_init_scripts st;
{ st with conf_files = OpamPackage.Map.remove nv st.conf_files }
| null | https://raw.githubusercontent.com/avsm/platform/b254e3c6b60f3c0c09dfdcde92eb1abdc267fa1c/duniverse/opam-client.2.0.5%2Bdune/src/state/opamSwitchAction.ml | ocaml | ************************************************************************
All rights reserved. This file is distributed under the terms of the
exception on linking described in the file LICENSE.
************************************************************************
Do some clean-up if necessary
Create base directories
adjust version of installed compiler packages
keep version of uninstalled compiler packages
note: don't remove_metadata just yet | Copyright 2012 - 2015 OCamlPro
Copyright 2012 INRIA
GNU Lesser General Public License version 2.1 , with the special
open OpamTypes
open OpamStateTypes
open OpamPackage.Set.Op
let log fmt = OpamConsole.log "SWACT" fmt
let slog = OpamConsole.slog
let gen_switch_config root ?(synopsis="") ?repos _switch =
let vars =
List.map (fun (s,p) -> OpamVariable.of_string s, S p) [
("user" ,
try (Unix.getpwuid (Unix.getuid ())).Unix.pw_name
with Not_found -> "user");
("group",
try (Unix.getgrgid (Unix.getgid ())).Unix.gr_name
with Not_found -> "group");
]
in
{ OpamFile.Switch_config.
opam_version = OpamVersion.current_nopatch;
synopsis;
variables = vars;
paths = [];
opam_root = Some root;
repos;
wrappers = OpamFile.Wrappers.empty;
env = []; }
let install_switch_config root switch config =
log "install_switch_config switch=%a" (slog OpamSwitch.to_string) switch;
OpamFile.Switch_config.write
(OpamPath.Switch.switch_config root switch)
config
let create_empty_switch gt ?synopsis ?repos switch =
log "create_empty_switch at %a" (slog OpamSwitch.to_string) switch;
let root = gt.root in
let switch_dir = OpamPath.Switch.root root switch in
if OpamFilename.exists_dir switch_dir then
failwith (Printf.sprintf "Directory %s already exists"
(OpamFilename.Dir.to_string switch_dir));
try
OpamFilename.mkdir switch_dir;
let config = gen_switch_config root ?synopsis ?repos switch in
OpamFilename.mkdir (OpamPath.Switch.lib_dir root switch config);
OpamFilename.mkdir (OpamPath.Switch.stublibs root switch config);
OpamFilename.mkdir (OpamPath.Switch.toplevel root switch config);
OpamFilename.mkdir (OpamPath.Switch.build_dir root switch);
OpamFilename.mkdir (OpamPath.Switch.bin root switch config);
OpamFilename.mkdir (OpamPath.Switch.sbin root switch config);
OpamFilename.mkdir (OpamPath.Switch.doc_dir root switch config);
OpamFilename.mkdir (OpamPath.Switch.man_dir root switch config);
OpamFilename.mkdir (OpamPath.Switch.install_dir root switch);
OpamFilename.mkdir (OpamPath.Switch.config_dir root switch);
List.iter (fun num ->
OpamFilename.mkdir (OpamPath.Switch.man_dir ~num root switch config)
) ["1";"1M";"2";"3";"4";"5";"6";"7";"9"];
install_switch_config root switch config;
let root_config =
OpamFile.Config.with_installed_switches
(switch::OpamFile.Config.installed_switches gt.config)
gt.config
in
let gt = { gt with config = root_config } in
OpamGlobalState.write gt;
gt
with e ->
if not (OpamConsole.debug ()) then
OpamFilename.rmdir switch_dir;
raise e
let write_selections st =
if not OpamStateConfig.(!r.dryrun) then
let f = OpamPath.Switch.selections st.switch_global.root st.switch in
let env = OpamPath.Switch.environment st.switch_global.root st.switch in
OpamFile.SwitchSelections.write f (OpamSwitchState.selections st);
OpamFile.Environment.write env (OpamEnv.compute_updates st)
let add_to_reinstall st ~unpinned_only packages =
log "add-to-reinstall unpinned_only:%b packages:%a" unpinned_only
(slog OpamPackage.Set.to_string) packages;
let root = st.switch_global.root in
let packages =
if unpinned_only then
OpamPackage.Set.filter
(fun nv -> not (OpamPackage.has_name st.pinned nv.name))
packages
else packages
in
let reinstall_file = OpamPath.Switch.reinstall root st.switch in
let current_reinstall = OpamFile.PkgList.safe_read reinstall_file in
let add_reinst_packages =
OpamPackage.packages_of_names st.installed
(OpamPackage.names_of_packages packages)
in
let reinstall =
current_reinstall ++ add_reinst_packages
in
if OpamPackage.Set.equal current_reinstall reinstall then ()
else if OpamPackage.Set.is_empty reinstall then
OpamFilename.remove (OpamFile.filename reinstall_file)
else
OpamFile.PkgList.write reinstall_file reinstall;
{ st with reinstall = st.reinstall ++ add_reinst_packages }
let set_current_switch lock gt ?rt switch =
if OpamSwitch.is_external switch then
OpamConsole.error_and_exit `Bad_arguments
"Can not set external switch '%s' globally. To set it in the current \
shell use:\n %s"
(OpamSwitch.to_string switch)
(OpamEnv.eval_string gt ~set_opamswitch:true (Some switch));
let config = OpamFile.Config.with_switch switch gt.config in
let gt = { gt with config } in
OpamGlobalState.write gt;
let rt = match rt with
| Some rt -> { rt with repos_global = gt }
| None -> OpamRepositoryState.load `Lock_none gt
in
let st = OpamSwitchState.load lock gt rt switch in
OpamEnv.write_dynamic_init_scripts st;
st
let install_metadata st nv =
let opam = OpamSwitchState.opam st nv in
OpamFile.OPAM.write
(OpamPath.Switch.installed_opam st.switch_global.root st.switch nv)
opam;
List.iter (fun (f, rel_path, _hash) ->
let dst =
OpamFilename.create
(OpamPath.Switch.installed_opam_files_dir
st.switch_global.root st.switch nv)
rel_path
in
OpamFilename.mkdir (OpamFilename.dirname dst);
OpamFilename.copy ~src:f ~dst)
(OpamFile.OPAM.get_extra_files opam)
let remove_metadata st packages =
OpamPackage.Set.iter (fun nv ->
OpamFilename.rmdir
(OpamPath.Switch.installed_package_dir
st.switch_global.root st.switch nv))
packages
let update_switch_state ?installed ?installed_roots ?reinstall ?pinned st =
let open OpamStd.Option.Op in
let open OpamPackage.Set.Op in
let installed = installed +! st.installed in
let reinstall0 = st.reinstall in
let reinstall = (reinstall +! reinstall0) %% installed in
let compiler_packages =
if OpamPackage.Set.is_empty (st.compiler_packages -- installed) then
st.compiler_packages
let names = OpamPackage.names_of_packages st.compiler_packages in
let installed_base = OpamPackage.packages_of_names installed names in
installed_base ++
OpamPackage.packages_of_names st.compiler_packages
(OpamPackage.Name.Set.diff names
(OpamPackage.names_of_packages installed_base))
in
let old_selections = OpamSwitchState.selections st in
let st =
{ st with
installed;
installed_roots = installed_roots +! st.installed_roots;
reinstall;
pinned = pinned +! st.pinned;
compiler_packages; }
in
if not OpamStateConfig.(!r.dryrun) then (
if OpamSwitchState.selections st <> old_selections then write_selections st;
if not (OpamPackage.Set.equal reinstall0 reinstall) then
OpamFile.PkgList.write
(OpamPath.Switch.reinstall st.switch_global.root st.switch)
(OpamPackage.Set.filter (OpamSwitchState.is_dev_package st) reinstall)
);
st
let add_to_installed st ?(root=false) nv =
let st =
update_switch_state st
~installed:(OpamPackage.Set.add nv st.installed)
~reinstall:(OpamPackage.Set.remove nv st.reinstall)
~installed_roots:
(let roots =
OpamPackage.Set.filter (fun nv1 -> nv1.name <> nv.name)
st.installed_roots
in
if root then OpamPackage.Set.add nv roots else st.installed_roots)
in
let opam = OpamSwitchState.opam st nv in
let conf =
OpamFile.Dot_config.safe_read
(OpamPath.Switch.config st.switch_global.root st.switch nv.name)
in
let st = { st with conf_files = OpamPackage.Map.add nv conf st.conf_files } in
if not OpamStateConfig.(!r.dryrun) then (
install_metadata st nv;
if OpamFile.OPAM.env opam <> [] &&
OpamSwitchState.is_switch_globally_set st
then
OpamEnv.write_dynamic_init_scripts st;
);
st
let remove_from_installed ?(keep_as_root=false) st nv =
let rm = OpamPackage.Set.remove nv in
let st =
update_switch_state st
~installed:(rm st.installed)
?installed_roots:(if keep_as_root then None
else Some (rm st.installed_roots))
~reinstall:(rm st.reinstall)
in
let has_setenv =
match OpamStd.Option.map OpamFile.OPAM.env (OpamSwitchState.opam_opt st nv)
with Some (_::_) -> true | _ -> false
in
if not OpamStateConfig.(!r.dryrun) &&
has_setenv && OpamSwitchState.is_switch_globally_set st
then
OpamEnv.write_dynamic_init_scripts st;
{ st with conf_files = OpamPackage.Map.remove nv st.conf_files }
|
984e7d5dc247e6db08d70eca91123a2ecab468971c46affe1884c5902b2b2875 | craigl64/clim-ccl | callbacks.lisp | ;; -*- mode: common-lisp; package: tk -*-
;; See the file LICENSE for the full license governing this code.
;;
(in-package :tk)
(defun has-callbacks-p (w name)
(> (xt_has_callbacks w name) 1))
(defun-foreign-callable callback-handler ((widget :foreign-address)
(client-data :foreign-address)
(call-data :foreign-address))
(callback-handler-1 widget client-data call-data))
(defun callback-handler-1 (address client-data call-data)
(let* ((widget (find-object-from-address address))
(callback-info (or (assoc client-data (widget-callback-data widget))
(error "Cannot find callback info ~S,~S"
widget client-data))))
(destructuring-bind
(ignore (fn &rest args) &optional type)
callback-info
(declare (ignore ignore))
(apply fn
widget
(append (multiple-value-list (spread-callback-data widget call-data type))
args)))
0))
(defmethod spread-callback-data (widget call-data (type (eql nil)))
(declare (ignore widget call-data))
(values))
(defvar *callback-handler-address* nil)
(defun add-callback (widget callback-name function &rest args)
(multiple-value-bind
(name type)
(convert-callback-name callback-name)
(xt_add_callback
widget
name
(or *callback-handler-address*
(setq *callback-handler-address* (register-foreign-callable 'callback-handler)))
(caar (push
(list (new-callback-id) (cons function args) type)
(widget-callback-data widget))))))
(defun-foreign-callable create-popup-child-proc-function ((widget :foreign-address))
(create-popup-child-proc-function-1 widget))
(defun create-popup-child-proc-function-1 (widget)
(let* ((widget (find-object-from-address widget))
(function (or (cdr (assoc :create-popup-child-proc (widget-callback-data widget)))
(error "cannot find create-popup-childp-proc ~S" widget))))
(funcall function widget)))
(defvar *create-popup-child-proc-function-address* nil)
(defun (setf widget-create-popup-child-proc) (function widget)
(push
(cons :create-popup-child-proc function)
(widget-callback-data widget))
(set-values widget :create-popup-child-proc
(or *create-popup-child-proc-function-address*
(setq *create-popup-child-proc-function-address*
(ff:register-foreign-callable 'create-popup-child-proc-function))))
function)
(defun remove-all-callbacks (widget callback-name)
(xt_remove_all_callbacks widget (convert-callback-name callback-name)))
(defvar *callback-ids* 0)
(defun new-callback-id ()
(incf *callback-ids*))
(defun process-callback-alist-component (x)
(destructuring-bind (name &optional type) x
(list (lispify-tk-name name :package :keyword)
name
nil;; malloc cache
type)))
(defparameter *callback-name-alist*
(mapcar #'process-callback-alist-component
'(
("activateCallback" :activate)
("armCallback")
("disarmCallback")
("popupCallback")
("popdownCallback")
("helpCallback")
("decrementCallback")
("dragCallback")
("incrementCallback")
("pageDecrementCallback")
("pageIncrementCallback")
("toBottomCallback")
("toTopCallback")
("focusCallback")
("losingFocusCallback")
("modifyVerifyCallback" :modify-verify)
("valueChangedCallback")
("noMatchCallback")
("cancelCallback")
("applyCallback")
("okCallback")
("browseSelectionCallback" :single-selection)
("singleSelectionCallback" :single-selection)
("defaultActionCallback")
("extendedSelectionCallback")
("multipleSelectionCallback" :multiple-selection)
("entryCallback")
("mapCallback")
("unmapCallback")
("cascadingCallback")
("commandChangedCallback")
("commandEnteredCallback")
("exposeCallback" drawing-area)
("inputCallback" drawing-area)
("resizeCallback" drawing-area)
("destroyCallback")
("gainPrimaryCallback")
("losePrimaryCallback")
;; Motif Callbacks
;; OpenLook Callbacks
("sliderMoved" slider-moved)
("select")
("unselect")
("postModifyNotification")
("userMakeCurrent" ol-list-item-make-current)
)))
(defun convert-callback-name (x)
(let ((z (assoc x *callback-name-alist*)))
(unless z (error "No such Callback: ~S" x))
(values (or (third z)
(setf (third z)
(lisp-string-to-string8 (second z))))
(fourth z))))
(defmethod spread-callback-data (widget data (type (eql :activate)))
(declare (ignore widget))
(x-push-button-callback-struct-click-count data))
(defmethod spread-callback-data (widget data (type (eql :modify-verify)))
(declare (ignore widget))
data)
(defmethod spread-callback-data (widget call-data (type (eql 'drawing-area)))
(declare (ignore widget))
(values (x-drawing-area-callback-window call-data)
(x-drawing-area-callback-event call-data)))
| null | https://raw.githubusercontent.com/craigl64/clim-ccl/301efbd770745b429f2b00b4e8ca6624de9d9ea9/tk/callbacks.lisp | lisp | -*- mode: common-lisp; package: tk -*-
See the file LICENSE for the full license governing this code.
malloc cache
Motif Callbacks
OpenLook Callbacks |
(in-package :tk)
(defun has-callbacks-p (w name)
(> (xt_has_callbacks w name) 1))
(defun-foreign-callable callback-handler ((widget :foreign-address)
(client-data :foreign-address)
(call-data :foreign-address))
(callback-handler-1 widget client-data call-data))
(defun callback-handler-1 (address client-data call-data)
(let* ((widget (find-object-from-address address))
(callback-info (or (assoc client-data (widget-callback-data widget))
(error "Cannot find callback info ~S,~S"
widget client-data))))
(destructuring-bind
(ignore (fn &rest args) &optional type)
callback-info
(declare (ignore ignore))
(apply fn
widget
(append (multiple-value-list (spread-callback-data widget call-data type))
args)))
0))
(defmethod spread-callback-data (widget call-data (type (eql nil)))
(declare (ignore widget call-data))
(values))
(defvar *callback-handler-address* nil)
(defun add-callback (widget callback-name function &rest args)
(multiple-value-bind
(name type)
(convert-callback-name callback-name)
(xt_add_callback
widget
name
(or *callback-handler-address*
(setq *callback-handler-address* (register-foreign-callable 'callback-handler)))
(caar (push
(list (new-callback-id) (cons function args) type)
(widget-callback-data widget))))))
(defun-foreign-callable create-popup-child-proc-function ((widget :foreign-address))
(create-popup-child-proc-function-1 widget))
(defun create-popup-child-proc-function-1 (widget)
(let* ((widget (find-object-from-address widget))
(function (or (cdr (assoc :create-popup-child-proc (widget-callback-data widget)))
(error "cannot find create-popup-childp-proc ~S" widget))))
(funcall function widget)))
(defvar *create-popup-child-proc-function-address* nil)
(defun (setf widget-create-popup-child-proc) (function widget)
(push
(cons :create-popup-child-proc function)
(widget-callback-data widget))
(set-values widget :create-popup-child-proc
(or *create-popup-child-proc-function-address*
(setq *create-popup-child-proc-function-address*
(ff:register-foreign-callable 'create-popup-child-proc-function))))
function)
(defun remove-all-callbacks (widget callback-name)
(xt_remove_all_callbacks widget (convert-callback-name callback-name)))
(defvar *callback-ids* 0)
(defun new-callback-id ()
(incf *callback-ids*))
(defun process-callback-alist-component (x)
(destructuring-bind (name &optional type) x
(list (lispify-tk-name name :package :keyword)
name
type)))
(defparameter *callback-name-alist*
(mapcar #'process-callback-alist-component
'(
("activateCallback" :activate)
("armCallback")
("disarmCallback")
("popupCallback")
("popdownCallback")
("helpCallback")
("decrementCallback")
("dragCallback")
("incrementCallback")
("pageDecrementCallback")
("pageIncrementCallback")
("toBottomCallback")
("toTopCallback")
("focusCallback")
("losingFocusCallback")
("modifyVerifyCallback" :modify-verify)
("valueChangedCallback")
("noMatchCallback")
("cancelCallback")
("applyCallback")
("okCallback")
("browseSelectionCallback" :single-selection)
("singleSelectionCallback" :single-selection)
("defaultActionCallback")
("extendedSelectionCallback")
("multipleSelectionCallback" :multiple-selection)
("entryCallback")
("mapCallback")
("unmapCallback")
("cascadingCallback")
("commandChangedCallback")
("commandEnteredCallback")
("exposeCallback" drawing-area)
("inputCallback" drawing-area)
("resizeCallback" drawing-area)
("destroyCallback")
("gainPrimaryCallback")
("losePrimaryCallback")
("sliderMoved" slider-moved)
("select")
("unselect")
("postModifyNotification")
("userMakeCurrent" ol-list-item-make-current)
)))
(defun convert-callback-name (x)
(let ((z (assoc x *callback-name-alist*)))
(unless z (error "No such Callback: ~S" x))
(values (or (third z)
(setf (third z)
(lisp-string-to-string8 (second z))))
(fourth z))))
(defmethod spread-callback-data (widget data (type (eql :activate)))
(declare (ignore widget))
(x-push-button-callback-struct-click-count data))
(defmethod spread-callback-data (widget data (type (eql :modify-verify)))
(declare (ignore widget))
data)
(defmethod spread-callback-data (widget call-data (type (eql 'drawing-area)))
(declare (ignore widget))
(values (x-drawing-area-callback-window call-data)
(x-drawing-area-callback-event call-data)))
|
2e19b3cf73ce467a2de21f8499a5506e3cdf9453071d88e0c5440d14c4500414 | psholtz/MIT-SICP | exercise2-28.scm | ;;
Exercise 2.28
;;
;; Write a procedure "fringe" that takes as arguments a tree (represented as a list) and
;; returns a list whose elements are all the leaves of the tree arranged in left-to-right
;; order. For example,
;;
( define x ( list ( list 1 2 ) ( list 3 4 ) ) )
;;
;; (fringe x)
;; (1 2 3 4)
;;
;; (fringe (list x x))
;; (1 2 3 4 1 2 3 4)
;;
;;
;; Define the "fringe" procedure:
;;
(define (fringe x)
(define (fringe-iter y)
(cond ((number? y) (list y))
((pair? y)
(append (fringe-iter (car y))
(fringe-iter (cdr y))))
(else '())))
(fringe-iter x))
;;
;; Run some unit tests:
;;
(define x (list (list 1 2) (list 3 4)))
(fringe x)
;; ==> (1 2 3 4)
(fringe (list x x))
;; ==> (1 2 3 4 1 2 3 4) | null | https://raw.githubusercontent.com/psholtz/MIT-SICP/01e9b722ac5008e26f386624849117ca8fa80906/Section-2.2/mit-scheme/exercise2-28.scm | scheme |
Write a procedure "fringe" that takes as arguments a tree (represented as a list) and
returns a list whose elements are all the leaves of the tree arranged in left-to-right
order. For example,
(fringe x)
(1 2 3 4)
(fringe (list x x))
(1 2 3 4 1 2 3 4)
Define the "fringe" procedure:
Run some unit tests:
==> (1 2 3 4)
==> (1 2 3 4 1 2 3 4) | Exercise 2.28
( define x ( list ( list 1 2 ) ( list 3 4 ) ) )
(define (fringe x)
(define (fringe-iter y)
(cond ((number? y) (list y))
((pair? y)
(append (fringe-iter (car y))
(fringe-iter (cdr y))))
(else '())))
(fringe-iter x))
(define x (list (list 1 2) (list 3 4)))
(fringe x)
(fringe (list x x)) |
a4c250bb2d6b0b5f0e1572fc243f7ee514f842757f280ca851431c4ed336ac07 | racket/racket7 | ffi-orig-place.rkt | #lang racket
(require ffi/unsafe)
(module+ test
(main))
;; Make sure that `#:in-original-place?' doesn't lead to deadlock:
(define scheme_malloc_atomic
(get-ffi-obj 'GC_malloc_atomic #f (_fun #:in-original-place? #t _long -> _pointer)))
(define (x-main)
(define ps
(for/list ([j (in-range 4)])
(define p
(place pch
(define j (place-channel-get pch))
;; Start a thread that keep having to wait on the original place:
(thread
(lambda ()
(let loop ()
(scheme_malloc_atomic 10)
(loop))))
Create a lot of message channels to trigger master GC :
(for ([i (in-range 10000)])
(place-channel))
(printf "done\n")
;; Helps check exit handling:
(when (even? j) (exit 1))))
(place-channel-put p j)
p))
(for-each place-wait ps))
(define (main)
(for ([i 5])
(printf "iter ~a\n" i)
(x-main)))
| null | https://raw.githubusercontent.com/racket/racket7/5dbb62c6bbec198b4a790f1dc08fef0c45c2e32b/pkgs/racket-test/tests/racket/ffi-orig-place.rkt | racket | Make sure that `#:in-original-place?' doesn't lead to deadlock:
Start a thread that keep having to wait on the original place:
Helps check exit handling: | #lang racket
(require ffi/unsafe)
(module+ test
(main))
(define scheme_malloc_atomic
(get-ffi-obj 'GC_malloc_atomic #f (_fun #:in-original-place? #t _long -> _pointer)))
(define (x-main)
(define ps
(for/list ([j (in-range 4)])
(define p
(place pch
(define j (place-channel-get pch))
(thread
(lambda ()
(let loop ()
(scheme_malloc_atomic 10)
(loop))))
Create a lot of message channels to trigger master GC :
(for ([i (in-range 10000)])
(place-channel))
(printf "done\n")
(when (even? j) (exit 1))))
(place-channel-put p j)
p))
(for-each place-wait ps))
(define (main)
(for ([i 5])
(printf "iter ~a\n" i)
(x-main)))
|
9ce0351d580d9d47a88032b01b47b853adb0a49b750a10680a06659d1b3361a8 | ajk/specialist-server | parser_test.clj | (ns specialist-server.parser-test
(:require [clojure.test :refer :all]
[clojure.java.io :as io]
[clojure.pprint :refer [pprint]]
[specialist-server.parser :as p]))
(def query (-> "test/__schema.txt" io/resource slurp))
(def result (-> "test/__schema.edn" io/resource slurp read-string))
(def bad-query
"query ($query: String=\"query string\") {
mutation
subscription
query(query:$query)
fragment {
query
}
}")
#_(pprint (p/parse "query Foo($a: String!=\"?\") {foo(a:1) {bar baz(b:2)} foo2 {bar2 baz2}}"))
#_(pprint (p/parse "query Foo { foo {bar {...Frag} baz} } query Bar {bar ...Frag } fragment Frag on bar {quux quux2}"))
#_(pprint (p/parse "query Foo { foo {bar {...Frag meh} baz} } fragment Frag on bar {quux quux2}"))
#_(pprint (p/parse query))
#_(time (dotimes [_ 1000]
(p/parse query)))
;;;
(deftest antlr-test
(testing "parse IntrospectionQuery"
(is (= (p/graphql query) result))
(is (= (p/graphql-two-step query) result))
)
(testing "parse query with reserved words"
(is (map? (p/parse bad-query))))
)
| null | https://raw.githubusercontent.com/ajk/specialist-server/1997ab3746cb730ca882e338d43836db6c997663/test/specialist_server/parser_test.clj | clojure | (ns specialist-server.parser-test
(:require [clojure.test :refer :all]
[clojure.java.io :as io]
[clojure.pprint :refer [pprint]]
[specialist-server.parser :as p]))
(def query (-> "test/__schema.txt" io/resource slurp))
(def result (-> "test/__schema.edn" io/resource slurp read-string))
(def bad-query
"query ($query: String=\"query string\") {
mutation
subscription
query(query:$query)
fragment {
query
}
}")
#_(pprint (p/parse "query Foo($a: String!=\"?\") {foo(a:1) {bar baz(b:2)} foo2 {bar2 baz2}}"))
#_(pprint (p/parse "query Foo { foo {bar {...Frag} baz} } query Bar {bar ...Frag } fragment Frag on bar {quux quux2}"))
#_(pprint (p/parse "query Foo { foo {bar {...Frag meh} baz} } fragment Frag on bar {quux quux2}"))
#_(pprint (p/parse query))
#_(time (dotimes [_ 1000]
(p/parse query)))
(deftest antlr-test
(testing "parse IntrospectionQuery"
(is (= (p/graphql query) result))
(is (= (p/graphql-two-step query) result))
)
(testing "parse query with reserved words"
(is (map? (p/parse bad-query))))
)
| |
a61ebfcd2a71ed4c399622fae95d060b6f3a888501cba9d1a91296b0b536191c | jellelicht/guix | download.scm | ;;; GNU Guix --- Functional package management for GNU
Copyright © 2012 , 2013 , 2014 , 2015 , 2016 < >
Copyright © 2013 , 2014 , 2015 < >
Copyright © 2015 < >
;;;
;;; This file is part of GNU Guix.
;;;
GNU is free software ; you can redistribute it and/or modify it
under the terms of the GNU General Public License as published by
the Free Software Foundation ; either version 3 of the License , or ( at
;;; your option) any later version.
;;;
;;; GNU Guix is distributed in the hope that it will be useful, but
;;; WITHOUT ANY WARRANTY; without even the implied warranty of
;;; MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
;;; GNU General Public License for more details.
;;;
You should have received a copy of the GNU General Public License
along with GNU . If not , see < / > .
(define-module (guix download)
#:use-module (ice-9 match)
#:use-module (guix derivations)
#:use-module (guix packages)
#:use-module (guix store)
#:use-module ((guix build download) #:prefix build:)
#:use-module (guix monads)
#:use-module (guix gexp)
#:use-module (guix utils)
#:use-module (web uri)
#:use-module (srfi srfi-1)
#:use-module (srfi srfi-26)
#:export (%mirrors
url-fetch
url-fetch/tarbomb
download-to-store))
;;; Commentary:
;;;
;;; Produce fixed-output derivations with data fetched over HTTP or FTP.
;;;
;;; Code:
(define %mirrors
Mirror lists used when ` mirror:// ' URLs are passed .
(let* ((gnu-mirrors
'(;; This one redirects to a (supposedly) nearby and (supposedly)
;; up-to-date mirror.
"/"
"ftp-berlin.de/pub/gnu/"
"ftp/"
;; This one is the master repository, and thus it's always
;; up-to-date.
"/")))
`((gnu ,@gnu-mirrors)
(gcc
"ftp/"
"ftp-berlin.de/unix/languages/gcc/"
"ftp/"
"ftp/"
,@(map (cut string-append <> "/gcc") gnu-mirrors))
(gnupg
"ftp/"
"ftp/"
"ftp/"
""
"ftp/"
"ftp/"
"/"
"ftp/"
"ftp/"
"ftp/"
"ftp/"
"ftp/"
"ftp/"
"ftp/"
"ftp/"
"ftp/"
"ftp/"
"ftp/")
(gnome
"/"
"/"
"/"
"/")
(savannah
"/"
"ftp/"
"ftp/"
"ftp/"
"ftp/"
"/"
"/"
"/"
"/"
"/"
"/"
"-noredirect/")
(sourceforge
"/"
"/"
"/"
"/"
"/"
"/"
"/")
(kernel.org
"/"
"-dresden.de/pub/mirrors/kernel.org/"
"-kernel.uio.no/pub/"
"/"
"ftp/"
"/"
"/")
(apache ; from
"/"
"/"
"ftp/"
"/"
"/"
"-mirror.rbc.ru/pub/apache/"
;; As a last resort, try the archive.
"/")
(xorg ; from
"/" ; main mirrors
North America
"ftp/"
"/"
"/"
"/"
Europe
"ftp/"
"ftp-berlin.de/unix/X11/FTP.X.ORG/"
"ftp/"
"ftp/"
"ftp/"
"ftp/"
"ftp-to-web.de/pub/mirrors/x.org/"
"ftp/"
"ftp/"
"ftp/"
"ftp/"
"ftp/"
"ftp/"
"ftp/"
East Asia
"ftp-aizu.ac.jp/pub/x11/x.org/"
"ftp-u.ac.jp/pub/X11/x.org/"
"ftp/"
"ftp-part.com/xorg/"
"/"
South Africa
(cpan ; from
"/"
"ftp/"
"ftp/"
"/"
"ftp/"
"/"
"ftp/"
"ftp/"
"ftp/"
"ftp/"
"/"
"ftp/")
(cran
;; Arbitrary mirrors from -project.org/mirrors.html
;; This one automatically redirects to servers worldwide
"-project.org/"
"/"
"-lyon1.fr/"
"/"
"/"
"/"
"/")
(imagemagick
;; from
;; (without mirrors that are unavailable or not up to date)
;; mirrors keeping old versions at the top level
"ftp/"
;; mirrors moving old versions to "legacy"
"-au.go-parts.com/mirrors/ImageMagick/"
"ftp/"
"/"
"ftp/"
"ftp-aizu.ac.jp/pub/graphics/image/ImageMagick/imagemagick.org/"
"ftp/"
"/"
""
"ftp/"
"-ru.go-parts.com/mirrors/ImageMagick/"
"/"
"-uk.go-parts.com/mirrors/ImageMagick/"
"-usa.go-parts.com/mirrors/ImageMagick/"
"ftp/"
"/"
;; one legacy location as a last resort
"/")
(debian
"/"
"/"
"/"))))
(define %mirror-file
;; Copy of the list of mirrors to a file. This allows us to keep a single
;; copy in the store, and computing it here avoids repeated calls to
;; 'object->string'.
(plain-file "mirrors" (object->string %mirrors)))
(define (gnutls-package)
"Return the default GnuTLS package."
(let ((module (resolve-interface '(gnu packages tls))))
(module-ref module 'gnutls)))
(define* (url-fetch url hash-algo hash
#:optional name
#:key (system (%current-system))
(guile (default-guile)))
"Return a fixed-output derivation that fetches URL (a string, or a list of
strings denoting alternate URLs), which is expected to have hash HASH of type
HASH-ALGO (a symbol). By default, the file name is the base name of URL;
optionally, NAME can specify a different file name.
When one of the URL starts with mirror://, then its host part is
interpreted as the name of a mirror scheme, taken from %MIRROR-FILE.
Alternately, when URL starts with file://, return the corresponding file name
in the store."
(define file-name
(match url
((head _ ...)
(basename head))
(_
(basename url))))
(define need-gnutls?
True if any of the URLs need TLS support .
(let ((https? (cut string-prefix? "https://" <>)))
(match url
((? string?)
(https? url))
((url ...)
(any https? url)))))
(define builder
#~(begin
#+(if need-gnutls?
;; Add GnuTLS to the inputs and to the load path.
#~(eval-when (load expand eval)
(set! %load-path
(cons (string-append #+(gnutls-package)
"/share/guile/site/"
(effective-version))
%load-path)))
#~#t)
(use-modules (guix build download))
(url-fetch (call-with-input-string (getenv "guix download url")
read)
#$output
#:mirrors (call-with-input-file #$%mirror-file read))))
(let ((uri (and (string? url) (string->uri url))))
(if (or (and (string? url) (not uri))
(and uri (memq (uri-scheme uri) '(#f file))))
(interned-file (if uri (uri-path uri) url)
(or name file-name))
(mlet %store-monad ((guile (package->derivation guile system)))
(gexp->derivation (or name file-name) builder
#:guile-for-build guile
#:system system
#:hash-algo hash-algo
#:hash hash
#:modules '((guix build download)
(guix build utils)
(guix ftp-client))
;; Use environment variables and a fixed script
name so there 's only one script in store for
;; all the downloads.
#:script-name "download"
#:env-vars
`(("guix download url" . ,(object->string url)))
;; Honor the user's proxy settings.
#:leaked-env-vars '("http_proxy" "https_proxy")
;; In general, offloading downloads is not a good
idea . before 0.8.3 would also
;; interpret this as "do not substitute" (see
;; <>.)
#:local-build? #t)))))
(define* (url-fetch/tarbomb url hash-algo hash
#:optional name
#:key (system (%current-system))
(guile (default-guile)))
"Similar to 'url-fetch' but unpack the file from URL in a directory of its
own. This helper makes it easier to deal with \"tar bombs\"."
(define gzip
(module-ref (resolve-interface '(gnu packages compression)) 'gzip))
(define tar
(module-ref (resolve-interface '(gnu packages base)) 'tar))
(mlet %store-monad ((drv (url-fetch url hash-algo hash
(string-append "tarbomb-" name)
#:system system
#:guile guile)))
;; Take the tar bomb, and simply unpack it as a directory.
(gexp->derivation name
#~(begin
(mkdir #$output)
(setenv "PATH" (string-append #$gzip "/bin"))
(chdir #$output)
(zero? (system* (string-append #$tar "/bin/tar")
"xf" #$drv)))
#:local-build? #t)))
(define* (download-to-store store url #:optional (name (basename url))
#:key (log (current-error-port)) recursive?)
"Download from URL to STORE, either under NAME or URL's basename if
omitted. Write progress reports to LOG. RECURSIVE? has the same effect as
the same-named parameter of 'add-to-store'."
(define uri
(string->uri url))
(if (or (not uri) (memq (uri-scheme uri) '(file #f)))
(add-to-store store name recursive? "sha256"
(if uri (uri-path uri) url))
(call-with-temporary-output-file
(lambda (temp port)
(let ((result
(parameterize ((current-output-port log))
(build:url-fetch url temp #:mirrors %mirrors))))
(close port)
(and result
(add-to-store store name recursive? "sha256" temp)))))))
;;; download.scm ends here
| null | https://raw.githubusercontent.com/jellelicht/guix/83cfc9414fca3ab57c949e18c1ceb375a179b59c/guix/download.scm | scheme | GNU Guix --- Functional package management for GNU
This file is part of GNU Guix.
you can redistribute it and/or modify it
either version 3 of the License , or ( at
your option) any later version.
GNU Guix is distributed in the hope that it will be useful, but
WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
Commentary:
Produce fixed-output derivations with data fetched over HTTP or FTP.
Code:
This one redirects to a (supposedly) nearby and (supposedly)
up-to-date mirror.
This one is the master repository, and thus it's always
up-to-date.
from
As a last resort, try the archive.
from
main mirrors
from
Arbitrary mirrors from -project.org/mirrors.html
This one automatically redirects to servers worldwide
from
(without mirrors that are unavailable or not up to date)
mirrors keeping old versions at the top level
mirrors moving old versions to "legacy"
one legacy location as a last resort
Copy of the list of mirrors to a file. This allows us to keep a single
copy in the store, and computing it here avoids repeated calls to
'object->string'.
Add GnuTLS to the inputs and to the load path.
Use environment variables and a fixed script
all the downloads.
Honor the user's proxy settings.
In general, offloading downloads is not a good
interpret this as "do not substitute" (see
<>.)
Take the tar bomb, and simply unpack it as a directory.
download.scm ends here | Copyright © 2012 , 2013 , 2014 , 2015 , 2016 < >
Copyright © 2013 , 2014 , 2015 < >
Copyright © 2015 < >
under the terms of the GNU General Public License as published by
You should have received a copy of the GNU General Public License
along with GNU . If not , see < / > .
(define-module (guix download)
#:use-module (ice-9 match)
#:use-module (guix derivations)
#:use-module (guix packages)
#:use-module (guix store)
#:use-module ((guix build download) #:prefix build:)
#:use-module (guix monads)
#:use-module (guix gexp)
#:use-module (guix utils)
#:use-module (web uri)
#:use-module (srfi srfi-1)
#:use-module (srfi srfi-26)
#:export (%mirrors
url-fetch
url-fetch/tarbomb
download-to-store))
(define %mirrors
Mirror lists used when ` mirror:// ' URLs are passed .
(let* ((gnu-mirrors
"/"
"ftp-berlin.de/pub/gnu/"
"ftp/"
"/")))
`((gnu ,@gnu-mirrors)
(gcc
"ftp/"
"ftp-berlin.de/unix/languages/gcc/"
"ftp/"
"ftp/"
,@(map (cut string-append <> "/gcc") gnu-mirrors))
(gnupg
"ftp/"
"ftp/"
"ftp/"
""
"ftp/"
"ftp/"
"/"
"ftp/"
"ftp/"
"ftp/"
"ftp/"
"ftp/"
"ftp/"
"ftp/"
"ftp/"
"ftp/"
"ftp/"
"ftp/")
(gnome
"/"
"/"
"/"
"/")
(savannah
"/"
"ftp/"
"ftp/"
"ftp/"
"ftp/"
"/"
"/"
"/"
"/"
"/"
"/"
"-noredirect/")
(sourceforge
"/"
"/"
"/"
"/"
"/"
"/"
"/")
(kernel.org
"/"
"-dresden.de/pub/mirrors/kernel.org/"
"-kernel.uio.no/pub/"
"/"
"ftp/"
"/"
"/")
"/"
"/"
"ftp/"
"/"
"/"
"-mirror.rbc.ru/pub/apache/"
"/")
North America
"ftp/"
"/"
"/"
"/"
Europe
"ftp/"
"ftp-berlin.de/unix/X11/FTP.X.ORG/"
"ftp/"
"ftp/"
"ftp/"
"ftp/"
"ftp-to-web.de/pub/mirrors/x.org/"
"ftp/"
"ftp/"
"ftp/"
"ftp/"
"ftp/"
"ftp/"
"ftp/"
East Asia
"ftp-aizu.ac.jp/pub/x11/x.org/"
"ftp-u.ac.jp/pub/X11/x.org/"
"ftp/"
"ftp-part.com/xorg/"
"/"
South Africa
"/"
"ftp/"
"ftp/"
"/"
"ftp/"
"/"
"ftp/"
"ftp/"
"ftp/"
"ftp/"
"/"
"ftp/")
(cran
"-project.org/"
"/"
"-lyon1.fr/"
"/"
"/"
"/"
"/")
(imagemagick
"ftp/"
"-au.go-parts.com/mirrors/ImageMagick/"
"ftp/"
"/"
"ftp/"
"ftp-aizu.ac.jp/pub/graphics/image/ImageMagick/imagemagick.org/"
"ftp/"
"/"
""
"ftp/"
"-ru.go-parts.com/mirrors/ImageMagick/"
"/"
"-uk.go-parts.com/mirrors/ImageMagick/"
"-usa.go-parts.com/mirrors/ImageMagick/"
"ftp/"
"/"
"/")
(debian
"/"
"/"
"/"))))
(define %mirror-file
(plain-file "mirrors" (object->string %mirrors)))
(define (gnutls-package)
"Return the default GnuTLS package."
(let ((module (resolve-interface '(gnu packages tls))))
(module-ref module 'gnutls)))
(define* (url-fetch url hash-algo hash
#:optional name
#:key (system (%current-system))
(guile (default-guile)))
"Return a fixed-output derivation that fetches URL (a string, or a list of
strings denoting alternate URLs), which is expected to have hash HASH of type
optionally, NAME can specify a different file name.
When one of the URL starts with mirror://, then its host part is
interpreted as the name of a mirror scheme, taken from %MIRROR-FILE.
Alternately, when URL starts with file://, return the corresponding file name
in the store."
(define file-name
(match url
((head _ ...)
(basename head))
(_
(basename url))))
(define need-gnutls?
True if any of the URLs need TLS support .
(let ((https? (cut string-prefix? "https://" <>)))
(match url
((? string?)
(https? url))
((url ...)
(any https? url)))))
(define builder
#~(begin
#+(if need-gnutls?
#~(eval-when (load expand eval)
(set! %load-path
(cons (string-append #+(gnutls-package)
"/share/guile/site/"
(effective-version))
%load-path)))
#~#t)
(use-modules (guix build download))
(url-fetch (call-with-input-string (getenv "guix download url")
read)
#$output
#:mirrors (call-with-input-file #$%mirror-file read))))
(let ((uri (and (string? url) (string->uri url))))
(if (or (and (string? url) (not uri))
(and uri (memq (uri-scheme uri) '(#f file))))
(interned-file (if uri (uri-path uri) url)
(or name file-name))
(mlet %store-monad ((guile (package->derivation guile system)))
(gexp->derivation (or name file-name) builder
#:guile-for-build guile
#:system system
#:hash-algo hash-algo
#:hash hash
#:modules '((guix build download)
(guix build utils)
(guix ftp-client))
name so there 's only one script in store for
#:script-name "download"
#:env-vars
`(("guix download url" . ,(object->string url)))
#:leaked-env-vars '("http_proxy" "https_proxy")
idea . before 0.8.3 would also
#:local-build? #t)))))
(define* (url-fetch/tarbomb url hash-algo hash
#:optional name
#:key (system (%current-system))
(guile (default-guile)))
"Similar to 'url-fetch' but unpack the file from URL in a directory of its
own. This helper makes it easier to deal with \"tar bombs\"."
(define gzip
(module-ref (resolve-interface '(gnu packages compression)) 'gzip))
(define tar
(module-ref (resolve-interface '(gnu packages base)) 'tar))
(mlet %store-monad ((drv (url-fetch url hash-algo hash
(string-append "tarbomb-" name)
#:system system
#:guile guile)))
(gexp->derivation name
#~(begin
(mkdir #$output)
(setenv "PATH" (string-append #$gzip "/bin"))
(chdir #$output)
(zero? (system* (string-append #$tar "/bin/tar")
"xf" #$drv)))
#:local-build? #t)))
(define* (download-to-store store url #:optional (name (basename url))
#:key (log (current-error-port)) recursive?)
"Download from URL to STORE, either under NAME or URL's basename if
omitted. Write progress reports to LOG. RECURSIVE? has the same effect as
the same-named parameter of 'add-to-store'."
(define uri
(string->uri url))
(if (or (not uri) (memq (uri-scheme uri) '(file #f)))
(add-to-store store name recursive? "sha256"
(if uri (uri-path uri) url))
(call-with-temporary-output-file
(lambda (temp port)
(let ((result
(parameterize ((current-output-port log))
(build:url-fetch url temp #:mirrors %mirrors))))
(close port)
(and result
(add-to-store store name recursive? "sha256" temp)))))))
|
786530029d70271e79485e722064f187f6dd5e1b9c38df48663bccd39e0b3710 | s-expressionists/Concrete-Syntax-Tree | standard-grammars.lisp | (cl:in-package #:concrete-syntax-tree)
(defparameter *ordinary-required-parameter-group*
'((ordinary-required-parameter-group <-
(* simple-variable))))
(defparameter *ordinary-optional-parameter-group*
'((ordinary-optional-parameter-group <-
keyword-optional
(* ordinary-optional-parameter))))
(defparameter *ordinary-rest-parameter-group*
'((ordinary-rest-parameter-group <-
keyword-rest
simple-variable)))
(defparameter *ordinary-key-parameter-group*
'((ordinary-key-parameter-group <-
keyword-key
(* ordinary-key-parameter)
(? keyword-allow-other-keys))))
(defparameter *aux-parameter-group*
'((aux-parameter-group <-
keyword-aux
(* aux-parameter))))
(defparameter *ordinary-lambda-list*
'((ordinary-lambda-list <-
ordinary-required-parameter-group
(? ordinary-optional-parameter-group)
(? ordinary-rest-parameter-group)
(? ordinary-key-parameter-group)
(? aux-parameter-group))))
(defparameter *generic-function-optional-parameter-group*
'((generic-function-optional-parameter-group <-
keyword-optional
(* generic-function-optional-parameter))))
(defparameter *generic-function-key-parameter-group*
'((generic-function-key-parameter-group <-
keyword-key
(* generic-function-key-parameter)
(? keyword-allow-other-keys))))
(defparameter *generic-function-lambda-list*
'((generic-function-lambda-list <-
ordinary-required-parameter-group
(? generic-function-optional-parameter-group)
(? ordinary-rest-parameter-group)
(? generic-function-key-parameter-group))))
(defparameter *specialized-required-parameter-group*
'((specialized-required-parameter-group <-
(* specialized-required-parameter))))
(defparameter *specialized-lambda-list*
'((specialized-lambda-list <-
specialized-required-parameter-group
(? ordinary-optional-parameter-group)
(? ordinary-rest-parameter-group)
(? ordinary-key-parameter-group)
(? aux-parameter-group))))
(defparameter *environment-parameter-group*
'((environment-parameter-group <-
keyword-environment
simple-variable)))
(defparameter *defsetf-lambda-list*
'((defsetf-lambda-list <-
ordinary-required-parameter-group
(? ordinary-optional-parameter-group)
(? ordinary-rest-parameter-group)
(? ordinary-key-parameter-group)
(? environment-parameter-group))))
(defparameter *define-modify-macro-lambda-list*
'((define-modify-macro-lambda-list <-
ordinary-required-parameter-group
(? ordinary-optional-parameter-group)
(? ordinary-rest-parameter-group))))
(defparameter *ordinary-whole-parameter-group*
'((ordinary-whole-parameter-group <-
keyword-whole
simple-variable)))
(defparameter *define-method-combination-lambda-list*
'((define-method-combination-lambda-list <-
(? ordinary-whole-parameter-group)
ordinary-required-parameter-group
(? ordinary-optional-parameter-group)
(? ordinary-rest-parameter-group)
(? ordinary-key-parameter-group)
(? aux-parameter-group))))
(defparameter *destructuring-whole-parameter-group*
'((destructuring-whole-parameter-group <-
keyword-whole
destructuring-parameter)))
(defparameter *destructuring-required-parameter-group*
'((destructuring-required-parameter-group <-
(* destructuring-parameter))))
(defparameter *destructuring-optional-parameter-group*
'((destructuring-optional-parameter-group <-
keyword-optional
(* destructuring-optional-parameter))))
(defparameter *destructuring-key-parameter-group*
'((destructuring-key-parameter-group <-
keyword-key
(* destructuring-key-parameter)
(? keyword-allow-other-keys))))
(defparameter *destructuring-rest-parameter-group*
'((destructuring-rest-parameter-group <-
keyword-rest
destructuring-parameter)
(destructuring-rest-parameter-group <-
keyword-body
destructuring-parameter)))
(defparameter *destructuring-lambda-list*
`((destructuring-lambda-list <-
(? destructuring-whole-parameter-group)
destructuring-required-parameter-group
(? ordinary-optional-parameter-group)
(? destructuring-rest-parameter-group)
(? ordinary-key-parameter-group)
(? aux-parameter-group))))
(defparameter *macro-lambda-list*
`((macro-lambda-list <-
(? destructuring-whole-parameter-group)
(? environment-parameter-group)
destructuring-required-parameter-group
(? environment-parameter-group)
(? destructuring-optional-parameter-group)
(? environment-parameter-group)
(? destructuring-rest-parameter-group)
(? environment-parameter-group)
(? destructuring-key-parameter-group)
(? environment-parameter-group)
(? aux-parameter-group)
(? environment-parameter-group))))
(defparameter *standard-grammar*
(append *ordinary-required-parameter-group*
*ordinary-optional-parameter-group*
*ordinary-rest-parameter-group*
*ordinary-key-parameter-group*
*aux-parameter-group*
*ordinary-lambda-list*
*generic-function-optional-parameter-group*
*generic-function-key-parameter-group*
*generic-function-lambda-list*
*specialized-required-parameter-group*
*specialized-lambda-list*
*environment-parameter-group*
*defsetf-lambda-list*
*define-modify-macro-lambda-list*
*ordinary-whole-parameter-group*
*define-method-combination-lambda-list*
*destructuring-whole-parameter-group*
*destructuring-required-parameter-group*
*destructuring-optional-parameter-group*
*destructuring-key-parameter-group*
*destructuring-rest-parameter-group*
*destructuring-lambda-list*
*macro-lambda-list*))
(defparameter *ordinary-lambda-list-grammar*
(generate-grammar 'ordinary-lambda-list *standard-grammar*))
(defparameter *generic-function-lambda-list-grammar*
(generate-grammar 'generic-function-lambda-list *standard-grammar*))
(defparameter *specialized-lambda-list-grammar*
(generate-grammar 'specialized-lambda-list *standard-grammar*))
(defparameter *defsetf-lambda-list-grammar*
(generate-grammar 'defsetf-lambda-list *standard-grammar*))
(defparameter *define-modify-macro-lambda-list-grammar*
(generate-grammar 'define-modify-macro-lambda-list *standard-grammar*))
(defparameter *define-method-combination-lambda-list-grammar*
(generate-grammar 'define-method-combination-lambda-list *standard-grammar*))
(defparameter *destructuring-lambda-list-grammar*
(generate-grammar 'destructuring-lambda-list *standard-grammar*))
(defparameter *macro-lambda-list-grammar*
(generate-grammar 'macro-lambda-list *standard-grammar*))
| null | https://raw.githubusercontent.com/s-expressionists/Concrete-Syntax-Tree/884a3ebdd288d7d9c24d55b4680086a88cb2dbc6/Lambda-list/standard-grammars.lisp | lisp | (cl:in-package #:concrete-syntax-tree)
(defparameter *ordinary-required-parameter-group*
'((ordinary-required-parameter-group <-
(* simple-variable))))
(defparameter *ordinary-optional-parameter-group*
'((ordinary-optional-parameter-group <-
keyword-optional
(* ordinary-optional-parameter))))
(defparameter *ordinary-rest-parameter-group*
'((ordinary-rest-parameter-group <-
keyword-rest
simple-variable)))
(defparameter *ordinary-key-parameter-group*
'((ordinary-key-parameter-group <-
keyword-key
(* ordinary-key-parameter)
(? keyword-allow-other-keys))))
(defparameter *aux-parameter-group*
'((aux-parameter-group <-
keyword-aux
(* aux-parameter))))
(defparameter *ordinary-lambda-list*
'((ordinary-lambda-list <-
ordinary-required-parameter-group
(? ordinary-optional-parameter-group)
(? ordinary-rest-parameter-group)
(? ordinary-key-parameter-group)
(? aux-parameter-group))))
(defparameter *generic-function-optional-parameter-group*
'((generic-function-optional-parameter-group <-
keyword-optional
(* generic-function-optional-parameter))))
(defparameter *generic-function-key-parameter-group*
'((generic-function-key-parameter-group <-
keyword-key
(* generic-function-key-parameter)
(? keyword-allow-other-keys))))
(defparameter *generic-function-lambda-list*
'((generic-function-lambda-list <-
ordinary-required-parameter-group
(? generic-function-optional-parameter-group)
(? ordinary-rest-parameter-group)
(? generic-function-key-parameter-group))))
(defparameter *specialized-required-parameter-group*
'((specialized-required-parameter-group <-
(* specialized-required-parameter))))
(defparameter *specialized-lambda-list*
'((specialized-lambda-list <-
specialized-required-parameter-group
(? ordinary-optional-parameter-group)
(? ordinary-rest-parameter-group)
(? ordinary-key-parameter-group)
(? aux-parameter-group))))
(defparameter *environment-parameter-group*
'((environment-parameter-group <-
keyword-environment
simple-variable)))
(defparameter *defsetf-lambda-list*
'((defsetf-lambda-list <-
ordinary-required-parameter-group
(? ordinary-optional-parameter-group)
(? ordinary-rest-parameter-group)
(? ordinary-key-parameter-group)
(? environment-parameter-group))))
(defparameter *define-modify-macro-lambda-list*
'((define-modify-macro-lambda-list <-
ordinary-required-parameter-group
(? ordinary-optional-parameter-group)
(? ordinary-rest-parameter-group))))
(defparameter *ordinary-whole-parameter-group*
'((ordinary-whole-parameter-group <-
keyword-whole
simple-variable)))
(defparameter *define-method-combination-lambda-list*
'((define-method-combination-lambda-list <-
(? ordinary-whole-parameter-group)
ordinary-required-parameter-group
(? ordinary-optional-parameter-group)
(? ordinary-rest-parameter-group)
(? ordinary-key-parameter-group)
(? aux-parameter-group))))
(defparameter *destructuring-whole-parameter-group*
'((destructuring-whole-parameter-group <-
keyword-whole
destructuring-parameter)))
(defparameter *destructuring-required-parameter-group*
'((destructuring-required-parameter-group <-
(* destructuring-parameter))))
(defparameter *destructuring-optional-parameter-group*
'((destructuring-optional-parameter-group <-
keyword-optional
(* destructuring-optional-parameter))))
(defparameter *destructuring-key-parameter-group*
'((destructuring-key-parameter-group <-
keyword-key
(* destructuring-key-parameter)
(? keyword-allow-other-keys))))
(defparameter *destructuring-rest-parameter-group*
'((destructuring-rest-parameter-group <-
keyword-rest
destructuring-parameter)
(destructuring-rest-parameter-group <-
keyword-body
destructuring-parameter)))
(defparameter *destructuring-lambda-list*
`((destructuring-lambda-list <-
(? destructuring-whole-parameter-group)
destructuring-required-parameter-group
(? ordinary-optional-parameter-group)
(? destructuring-rest-parameter-group)
(? ordinary-key-parameter-group)
(? aux-parameter-group))))
(defparameter *macro-lambda-list*
`((macro-lambda-list <-
(? destructuring-whole-parameter-group)
(? environment-parameter-group)
destructuring-required-parameter-group
(? environment-parameter-group)
(? destructuring-optional-parameter-group)
(? environment-parameter-group)
(? destructuring-rest-parameter-group)
(? environment-parameter-group)
(? destructuring-key-parameter-group)
(? environment-parameter-group)
(? aux-parameter-group)
(? environment-parameter-group))))
(defparameter *standard-grammar*
(append *ordinary-required-parameter-group*
*ordinary-optional-parameter-group*
*ordinary-rest-parameter-group*
*ordinary-key-parameter-group*
*aux-parameter-group*
*ordinary-lambda-list*
*generic-function-optional-parameter-group*
*generic-function-key-parameter-group*
*generic-function-lambda-list*
*specialized-required-parameter-group*
*specialized-lambda-list*
*environment-parameter-group*
*defsetf-lambda-list*
*define-modify-macro-lambda-list*
*ordinary-whole-parameter-group*
*define-method-combination-lambda-list*
*destructuring-whole-parameter-group*
*destructuring-required-parameter-group*
*destructuring-optional-parameter-group*
*destructuring-key-parameter-group*
*destructuring-rest-parameter-group*
*destructuring-lambda-list*
*macro-lambda-list*))
(defparameter *ordinary-lambda-list-grammar*
(generate-grammar 'ordinary-lambda-list *standard-grammar*))
(defparameter *generic-function-lambda-list-grammar*
(generate-grammar 'generic-function-lambda-list *standard-grammar*))
(defparameter *specialized-lambda-list-grammar*
(generate-grammar 'specialized-lambda-list *standard-grammar*))
(defparameter *defsetf-lambda-list-grammar*
(generate-grammar 'defsetf-lambda-list *standard-grammar*))
(defparameter *define-modify-macro-lambda-list-grammar*
(generate-grammar 'define-modify-macro-lambda-list *standard-grammar*))
(defparameter *define-method-combination-lambda-list-grammar*
(generate-grammar 'define-method-combination-lambda-list *standard-grammar*))
(defparameter *destructuring-lambda-list-grammar*
(generate-grammar 'destructuring-lambda-list *standard-grammar*))
(defparameter *macro-lambda-list-grammar*
(generate-grammar 'macro-lambda-list *standard-grammar*))
| |
5e611e78cb1bc8d7a22586694b8d5c89cb42e95494f7a53ffab6278b608343f3 | byorgey/thesis | Bijections.hs | # LANGUAGE FlexibleInstances #
# LANGUAGE NoMonomorphismRestriction #
# LANGUAGE TemplateHaskell #
# LANGUAGE TupleSections #
{-# LANGUAGE TypeOperators #-}
{-# LANGUAGE TypeSynonymInstances #-}
module Bijections where
import Control.Arrow ((&&&))
import Control.Lens (makeLenses, mapped, (^.), _2)
import Control.Monad (msum)
import Data.Default.Class
import Data.List (findIndex, isSuffixOf, partition)
import qualified Data.Map as M
import Data.Maybe (catMaybes, fromMaybe)
import Data.Typeable
import Diagrams.Backend.Cairo
import Diagrams.Core.Names
import Diagrams.Prelude hiding (end, r2, start)
import Graphics.SVGFonts
------------------------------------------------------------
Diagram utilities
type Dia = Diagram Cairo R2
dot :: Dia
dot = circle 0.3 # fc black # lw none
text' :: Double -> String -> Dia
text' d t = stroke (textSVG' $ TextOpts t lin INSIDE_H KERN False d d) # fc black
------------------------------------------------------------
-- Name utilities
disjointly :: Qualifiable q => ([q] -> q) -> [q] -> q
disjointly f = f . zipWith (|>) ['a'..]
(|@) :: Char -> Int -> Name
c |@ i = c |> toName i
(|@@) :: Char -> [Int] -> [Name]
c |@@ is = map (c |@) is
------------------------------------------------------------
Parallel composition
Parallel composition is not necessarily associative , nor is empty
-- an identity.
class Par p where
empty :: p
par :: p -> p -> p
par x y = pars [x,y]
pars :: [p] -> p
pars = foldr par empty
------------------------------------------------------------
-- Sets
data ASet =
ASet
{ _eltNames :: [Name]
, _setColor :: Colour Double
}
$(makeLenses ''ASet)
instance Qualifiable ASet where
n |> s = s & eltNames %~ (n|>)
type Set = [ASet]
instance Par Set where
empty = []
pars = disjointly concat
nset :: Int -> Colour Double -> ASet
nset n c = ASet (map toName [0::Int .. (n-1)]) c
set :: IsName n => [n] -> Colour Double -> ASet
set ns c = ASet (map toName ns) c
drawSet :: Set -> Dia
drawSet = centerY . vcat . zipWithMult (|>) ['a'..] . map drawAtomic . annot . annot
where
zipWithMult _ _ [x] = [x]
zipWithMult f xs ys = zipWith f xs ys
annot = reverse . zip (False : repeat True)
drawAtomic (bot, (top, ASet nms c))
= mconcat
[ vcat' (with & sep .~ 1 & catMethod .~ Distrib)
(zipWith named nms (replicate (length nms) dot))
# centerY
, roundedRect' 1 (fromIntegral (length nms))
(with & radiusTL .~ (if top then 0 else (1/2))
& radiusTR .~ (if top then 0 else (1/2))
& radiusBL .~ (if bot then 0 else (1/2))
& radiusBR .~ (if bot then 0 else (1/2))
)
# fcA (c `withOpacity` 0.5)
]
------------------------------------------------------------
data ABij
= ABij
{ _bijDomain :: [Name]
, _bijData :: Name -> Maybe Name
, _bijStyle :: Name -> Style R2
, _bijSep :: Double
, _bijLabel :: Maybe Dia
}
$(makeLenses ''ABij)
instance Qualifiable ABij where
n |> bij = bij & bijData %~ prefixF n & bijDomain %~ (n |>)
where
prefixF :: IsName a => a -> (Name -> Maybe Name) -> (Name -> Maybe Name)
prefixF _ _ (Name []) = Just $ Name []
prefixF i f (Name (AName a : as)) =
case cast a of
Nothing -> Nothing
Just a' -> if a' == i then (i |>) <$> f (Name as) else Nothing
bijFun :: [Int] -> (Int -> Maybe Int) -> ABij
bijFun is f = def & bijDomain .~ toNamesI is & bijData .~ fmap toName . f . extractInt 0
where
extractInt :: Int -> Name -> Int
extractInt i (Name []) = i
extractInt i (Name ns) = case last ns of
AName a -> case cast a of
Nothing -> i
Just i' -> i'
bijTable :: [(Name, Name)] -> ABij
bijTable tab = def & bijDomain .~ map fst tab & bijData .~ tableToFun tab
mkABij :: ASet -> ASet -> (Int -> Int) -> ABij
mkABij s1 s2 f = def & bijDomain .~ (s1 ^. eltNames)
& bijData .~ \n -> findIndex (==n) (s1 ^. eltNames) >>= ((s2^.eltNames) !!!) . f
-- mkBij :: Set -> Set -> (Int -> Int) -> Bij
-- mkBij ss1 ss2 f = undefined
(!!!) :: [a] -> Int -> Maybe a
[] !!! _ = Nothing
(x:_) !!! 0 = Just x
(_:xs) !!! n = xs !!! (n-1)
tableToFun :: Eq a => [(a, b)] -> a -> Maybe b
tableToFun = flip lookup
instance Default ABij where
def = ABij
{ _bijDomain = []
, _bijData = const Nothing
, _bijStyle = const $ mempty # dashingG [0.03,0.02] 0 # lineCap LineCapButt
, _bijSep = 3
, _bijLabel = Nothing
}
type Bij = [ABij]
emptyBij :: Bij
emptyBij = [with & bijData .~ const Nothing]
parBij :: Bij -> Bij -> Bij
parBij x y = parBijs [x,y]
parBijs :: [Bij] -> Bij
parBijs = disjointly concat
labelBij :: String -> Bij -> Bij
labelBij s = (mapped . bijLabel) .~ Just (text' 2 s)
------------------------------------------------------------
-- Alternating lists
data AltList a b
= Single a
| Cons a b (AltList a b)
infixr 5 .-, -., -..
(.-) :: a -> (b, AltList a b) -> AltList a b
a .- (b,l) = Cons a b l
(-.) :: b -> AltList a b -> (b, AltList a b)
(-.) = (,)
(-..) :: b -> a -> (b,AltList a b)
b -.. a = (b, Single a)
zipWithA :: (a1 -> a2 -> a3) -> (b1 -> b2 -> b3) -> AltList a1 b1 -> AltList a2 b2 -> AltList a3 b3
zipWithA f _ (Single x1) (Single x2) = Single (f x1 x2)
zipWithA f _ (Single x1) (Cons x2 _ _) = Single (f x1 x2)
zipWithA f _ (Cons x1 _ _) (Single x2) = Single (f x1 x2)
zipWithA f g (Cons x1 y1 l1) (Cons x2 y2 l2) = Cons (f x1 x2) (g y1 y2) (zipWithA f g l1 l2)
concatA :: AltList a b -> b -> AltList a b -> AltList a b
concatA (Single a) b l = Cons a b l
concatA (Cons a b l) b' l' = Cons a b (concatA l b' l')
flattenA :: AltList (AltList a b) b -> AltList a b
flattenA (Single l) = l
flattenA (Cons l b l') = concatA l b (flattenA l')
map1 :: (a -> b) -> AltList a c -> AltList b c
map1 f (Single a) = Single (f a)
map1 f (Cons a b l) = Cons (f a) b (map1 f l)
map2 :: (b -> c) -> AltList a b -> AltList a c
map2 _ (Single a) = Single a
map2 g (Cons a b l) = Cons a (g b) (map2 g l)
iterateA :: (a -> b) -> (b -> a) -> a -> AltList a b
iterateA f g a = Cons a b (iterateA f g (g b))
where b = f a
takeWhileA :: (b -> Bool) -> AltList a b -> AltList a b
takeWhileA _ (Single a) = Single a
takeWhileA p (Cons a b l)
| p b = Cons a b (takeWhileA p l)
| otherwise = Single a
foldA :: (a -> r) -> (a -> b -> r -> r) -> AltList a b -> r
foldA f _ (Single a) = f a
foldA f g (Cons a b l) = g a b (foldA f g l)
------------------------------------------------------------
Bijection complexes
type BComplex = AltList Set Bij
labelBC :: String -> BComplex -> BComplex
labelBC = map2 . labelBij
seqC :: BComplex -> Bij -> BComplex -> BComplex
seqC = concatA
parC :: BComplex -> BComplex -> BComplex
parC = zipWithA (++) parBij
drawBComplex :: BComplex -> Dia
drawBComplex = centerX . drawBComplexR 0
where
drawBComplexR :: Int -> BComplex -> Dia
drawBComplexR i (Single s) = i |> drawSet s
drawBComplexR i (Cons ss bs c) =
hcat
[ i |> s1
, strutX thisSep <> label
, drawBComplexR (succ i) c
]
# applyAll (map (drawABij i (map fst $ names s1)) bs)
where
s1 = drawSet ss
thisSep = case bs of
[] -> 0
_ -> maximum . map (^. bijSep) $ bs
label = (fromMaybe mempty . msum . reverse . map (^. bijLabel) $ bs)
# (\d -> d # withEnvelope (strutY (height d) :: D R2))
# (\d -> translateY (-(height s1 + thisSep - height d)/2) d)
drawABij :: Int -> [Name] -> ABij -> Dia -> Dia
drawABij i ns b = applyAll (map conn . catMaybes . map (_2 id . (id &&& (b ^. bijData))) $ ns)
where
conn :: (Name,Name) -> Dia -> Dia
conn (n1,n2) = withNames [i |> n1, (i+1) |> n2] $ \[s1,s2] -> atop (drawLine s1 s2 # applyStyle (sty n1))
sty = b ^. bijStyle
drawLine sub1 sub2 = boundaryFrom sub1 v ~~ boundaryFrom sub2 (negateV v)
where
v = location sub2 .-. location sub1
toNameI :: Int -> Name
toNameI = toName
toNamesI :: [Int] -> [Name]
toNamesI = map toName
plus, minus, equals :: Dia
plus = hrule 1 <> vrule 1
minus = hrule 1
equals = hrule 1 === strutY 0.5 === hrule 1
mapAName :: (Typeable a, Typeable b, Ord b, Show b) => (a -> b) -> AName -> AName
mapAName f an@(AName x) = case cast x of
Nothing -> an
Just a -> AName (f a)
mapName :: (Typeable a, Typeable b, Ord b, Show b) => (a -> b) -> Name -> Name
mapName f (Name ns) = Name (map (mapAName f) ns)
------------------------------------------------------------
-- Computing orbits/coloration
type Edge a = (a,a)
type Relator a = (a,[a],a)
mkRelator :: Edge a -> Relator a
mkRelator (n1,n2) = (n1,[],n2)
start :: Relator a -> a
start (n,_,_) = n
end :: Relator a -> a
end (_,_,n) = n
relatorToList :: Relator a -> [a]
relatorToList (a,bs,c) = a : bs ++ [c]
isTailOf :: Eq a => Relator a -> Relator a -> Bool
isTailOf r1 r2 = relatorToList r1 `isSuffixOf` relatorToList r2 && r1 /= r2
composeRelators :: Eq a => Relator a -> Relator a -> Maybe (Relator a)
composeRelators (s1,ns1,e1) (s2,ns2,e2)
| e1 == s2 = Just (s1,ns1++[e1]++ns2,e2)
| otherwise = Nothing
type Relation a = [Relator a]
mkRelation :: [Edge a] -> Relation a
mkRelation = map mkRelator
emptyR :: Relation a
emptyR = []
unionR :: Relation a -> Relation a -> Relation a
unionR = (++)
composeR :: Eq a => Relation a -> Relation a -> Relation a
composeR rs1 rs2 = [ rel | rel1 <- rs1, rel2 <- rs2, Just rel <- [composeRelators rel1 rel2] ]
orbits :: Eq a => Relation a -> Relation a -> Relation a
orbits r1 r2 = removeTails $ orbits' r2 r1 r1
where
orbits' _ _ [] = []
orbits' r1 r2 r = done `unionR` orbits' r2 r1 (r' `composeR` r1)
where
(done, r') = partition finished r
finished rel = (start rel == end rel) || all ((/= end rel) . start) r1
removeTails rs = filter (\r -> not (any (r `isTailOf`) rs)) rs
bijToRel :: Bij -> Relation Name
bijToRel = foldr unionR emptyR . map bijToRel1
where
bijToRel1 bij = mkRelation . catMaybes . map (_2 id . (id &&& (bij^.bijData))) $ bij^.bijDomain
orbitsToColorMap :: Ord a => [Colour Double] -> Relation a -> M.Map a (Colour Double)
orbitsToColorMap colors orbs = M.fromList (concat $ zipWith (\rel c -> map (,c) rel) (map relatorToList orbs) (cycle colors))
colorBij :: M.Map Name (Colour Double) -> Bij -> Bij
colorBij colors = map colorBij'
where
colorBij' bij = bij & bijStyle .~ \n -> maybe id lc (M.lookup n colors) ((bij ^. bijStyle) n)
------------------------------------------------------------
-- Example sets and bijections
a0, b0, a1, b1 :: ASet
a0 = nset 3 yellow
b0 = nset 3 blue
a1 = nset 2 green
b1 = nset 2 red
bc0, bc1, bc01 :: BComplex
bc0 = [a0] .- bij0 -.. [b0]
bc1 = [a1] .- bij1 -.. [b1]
bc01 = [a0,a1] .- bij01 -.. [b0,b1]
bij0, bij1 :: Bij
bij0 = [mkABij a0 b0 ((`mod` 3) . succ . succ)]
bij1 = [mkABij a1 b1 id]
names01, names02 :: [Name]
names01 = 'X' |> disjointly concat [head bij0^.bijDomain, head bij1^.bijDomain]
names02 = 'Y' |> (('a' |@@ [1,2]) ++ ('b' |@@ [0,1]) ++ ('a' |@@ [0]))
bij01 :: Bij
bij01 = []
| null | https://raw.githubusercontent.com/byorgey/thesis/b60af0501cdb2d1154d9303694884f461c0e499a/Bijections.hs | haskell | # LANGUAGE TypeOperators #
# LANGUAGE TypeSynonymInstances #
----------------------------------------------------------
----------------------------------------------------------
Name utilities
----------------------------------------------------------
an identity.
----------------------------------------------------------
Sets
----------------------------------------------------------
mkBij :: Set -> Set -> (Int -> Int) -> Bij
mkBij ss1 ss2 f = undefined
----------------------------------------------------------
Alternating lists
----------------------------------------------------------
----------------------------------------------------------
Computing orbits/coloration
----------------------------------------------------------
Example sets and bijections | # LANGUAGE FlexibleInstances #
# LANGUAGE NoMonomorphismRestriction #
# LANGUAGE TemplateHaskell #
# LANGUAGE TupleSections #
module Bijections where
import Control.Arrow ((&&&))
import Control.Lens (makeLenses, mapped, (^.), _2)
import Control.Monad (msum)
import Data.Default.Class
import Data.List (findIndex, isSuffixOf, partition)
import qualified Data.Map as M
import Data.Maybe (catMaybes, fromMaybe)
import Data.Typeable
import Diagrams.Backend.Cairo
import Diagrams.Core.Names
import Diagrams.Prelude hiding (end, r2, start)
import Graphics.SVGFonts
Diagram utilities
type Dia = Diagram Cairo R2
dot :: Dia
dot = circle 0.3 # fc black # lw none
text' :: Double -> String -> Dia
text' d t = stroke (textSVG' $ TextOpts t lin INSIDE_H KERN False d d) # fc black
disjointly :: Qualifiable q => ([q] -> q) -> [q] -> q
disjointly f = f . zipWith (|>) ['a'..]
(|@) :: Char -> Int -> Name
c |@ i = c |> toName i
(|@@) :: Char -> [Int] -> [Name]
c |@@ is = map (c |@) is
Parallel composition
Parallel composition is not necessarily associative , nor is empty
class Par p where
empty :: p
par :: p -> p -> p
par x y = pars [x,y]
pars :: [p] -> p
pars = foldr par empty
data ASet =
ASet
{ _eltNames :: [Name]
, _setColor :: Colour Double
}
$(makeLenses ''ASet)
instance Qualifiable ASet where
n |> s = s & eltNames %~ (n|>)
type Set = [ASet]
instance Par Set where
empty = []
pars = disjointly concat
nset :: Int -> Colour Double -> ASet
nset n c = ASet (map toName [0::Int .. (n-1)]) c
set :: IsName n => [n] -> Colour Double -> ASet
set ns c = ASet (map toName ns) c
drawSet :: Set -> Dia
drawSet = centerY . vcat . zipWithMult (|>) ['a'..] . map drawAtomic . annot . annot
where
zipWithMult _ _ [x] = [x]
zipWithMult f xs ys = zipWith f xs ys
annot = reverse . zip (False : repeat True)
drawAtomic (bot, (top, ASet nms c))
= mconcat
[ vcat' (with & sep .~ 1 & catMethod .~ Distrib)
(zipWith named nms (replicate (length nms) dot))
# centerY
, roundedRect' 1 (fromIntegral (length nms))
(with & radiusTL .~ (if top then 0 else (1/2))
& radiusTR .~ (if top then 0 else (1/2))
& radiusBL .~ (if bot then 0 else (1/2))
& radiusBR .~ (if bot then 0 else (1/2))
)
# fcA (c `withOpacity` 0.5)
]
data ABij
= ABij
{ _bijDomain :: [Name]
, _bijData :: Name -> Maybe Name
, _bijStyle :: Name -> Style R2
, _bijSep :: Double
, _bijLabel :: Maybe Dia
}
$(makeLenses ''ABij)
instance Qualifiable ABij where
n |> bij = bij & bijData %~ prefixF n & bijDomain %~ (n |>)
where
prefixF :: IsName a => a -> (Name -> Maybe Name) -> (Name -> Maybe Name)
prefixF _ _ (Name []) = Just $ Name []
prefixF i f (Name (AName a : as)) =
case cast a of
Nothing -> Nothing
Just a' -> if a' == i then (i |>) <$> f (Name as) else Nothing
bijFun :: [Int] -> (Int -> Maybe Int) -> ABij
bijFun is f = def & bijDomain .~ toNamesI is & bijData .~ fmap toName . f . extractInt 0
where
extractInt :: Int -> Name -> Int
extractInt i (Name []) = i
extractInt i (Name ns) = case last ns of
AName a -> case cast a of
Nothing -> i
Just i' -> i'
bijTable :: [(Name, Name)] -> ABij
bijTable tab = def & bijDomain .~ map fst tab & bijData .~ tableToFun tab
mkABij :: ASet -> ASet -> (Int -> Int) -> ABij
mkABij s1 s2 f = def & bijDomain .~ (s1 ^. eltNames)
& bijData .~ \n -> findIndex (==n) (s1 ^. eltNames) >>= ((s2^.eltNames) !!!) . f
(!!!) :: [a] -> Int -> Maybe a
[] !!! _ = Nothing
(x:_) !!! 0 = Just x
(_:xs) !!! n = xs !!! (n-1)
tableToFun :: Eq a => [(a, b)] -> a -> Maybe b
tableToFun = flip lookup
instance Default ABij where
def = ABij
{ _bijDomain = []
, _bijData = const Nothing
, _bijStyle = const $ mempty # dashingG [0.03,0.02] 0 # lineCap LineCapButt
, _bijSep = 3
, _bijLabel = Nothing
}
type Bij = [ABij]
emptyBij :: Bij
emptyBij = [with & bijData .~ const Nothing]
parBij :: Bij -> Bij -> Bij
parBij x y = parBijs [x,y]
parBijs :: [Bij] -> Bij
parBijs = disjointly concat
labelBij :: String -> Bij -> Bij
labelBij s = (mapped . bijLabel) .~ Just (text' 2 s)
data AltList a b
= Single a
| Cons a b (AltList a b)
infixr 5 .-, -., -..
(.-) :: a -> (b, AltList a b) -> AltList a b
a .- (b,l) = Cons a b l
(-.) :: b -> AltList a b -> (b, AltList a b)
(-.) = (,)
(-..) :: b -> a -> (b,AltList a b)
b -.. a = (b, Single a)
zipWithA :: (a1 -> a2 -> a3) -> (b1 -> b2 -> b3) -> AltList a1 b1 -> AltList a2 b2 -> AltList a3 b3
zipWithA f _ (Single x1) (Single x2) = Single (f x1 x2)
zipWithA f _ (Single x1) (Cons x2 _ _) = Single (f x1 x2)
zipWithA f _ (Cons x1 _ _) (Single x2) = Single (f x1 x2)
zipWithA f g (Cons x1 y1 l1) (Cons x2 y2 l2) = Cons (f x1 x2) (g y1 y2) (zipWithA f g l1 l2)
concatA :: AltList a b -> b -> AltList a b -> AltList a b
concatA (Single a) b l = Cons a b l
concatA (Cons a b l) b' l' = Cons a b (concatA l b' l')
flattenA :: AltList (AltList a b) b -> AltList a b
flattenA (Single l) = l
flattenA (Cons l b l') = concatA l b (flattenA l')
map1 :: (a -> b) -> AltList a c -> AltList b c
map1 f (Single a) = Single (f a)
map1 f (Cons a b l) = Cons (f a) b (map1 f l)
map2 :: (b -> c) -> AltList a b -> AltList a c
map2 _ (Single a) = Single a
map2 g (Cons a b l) = Cons a (g b) (map2 g l)
iterateA :: (a -> b) -> (b -> a) -> a -> AltList a b
iterateA f g a = Cons a b (iterateA f g (g b))
where b = f a
takeWhileA :: (b -> Bool) -> AltList a b -> AltList a b
takeWhileA _ (Single a) = Single a
takeWhileA p (Cons a b l)
| p b = Cons a b (takeWhileA p l)
| otherwise = Single a
foldA :: (a -> r) -> (a -> b -> r -> r) -> AltList a b -> r
foldA f _ (Single a) = f a
foldA f g (Cons a b l) = g a b (foldA f g l)
Bijection complexes
type BComplex = AltList Set Bij
labelBC :: String -> BComplex -> BComplex
labelBC = map2 . labelBij
seqC :: BComplex -> Bij -> BComplex -> BComplex
seqC = concatA
parC :: BComplex -> BComplex -> BComplex
parC = zipWithA (++) parBij
drawBComplex :: BComplex -> Dia
drawBComplex = centerX . drawBComplexR 0
where
drawBComplexR :: Int -> BComplex -> Dia
drawBComplexR i (Single s) = i |> drawSet s
drawBComplexR i (Cons ss bs c) =
hcat
[ i |> s1
, strutX thisSep <> label
, drawBComplexR (succ i) c
]
# applyAll (map (drawABij i (map fst $ names s1)) bs)
where
s1 = drawSet ss
thisSep = case bs of
[] -> 0
_ -> maximum . map (^. bijSep) $ bs
label = (fromMaybe mempty . msum . reverse . map (^. bijLabel) $ bs)
# (\d -> d # withEnvelope (strutY (height d) :: D R2))
# (\d -> translateY (-(height s1 + thisSep - height d)/2) d)
drawABij :: Int -> [Name] -> ABij -> Dia -> Dia
drawABij i ns b = applyAll (map conn . catMaybes . map (_2 id . (id &&& (b ^. bijData))) $ ns)
where
conn :: (Name,Name) -> Dia -> Dia
conn (n1,n2) = withNames [i |> n1, (i+1) |> n2] $ \[s1,s2] -> atop (drawLine s1 s2 # applyStyle (sty n1))
sty = b ^. bijStyle
drawLine sub1 sub2 = boundaryFrom sub1 v ~~ boundaryFrom sub2 (negateV v)
where
v = location sub2 .-. location sub1
toNameI :: Int -> Name
toNameI = toName
toNamesI :: [Int] -> [Name]
toNamesI = map toName
plus, minus, equals :: Dia
plus = hrule 1 <> vrule 1
minus = hrule 1
equals = hrule 1 === strutY 0.5 === hrule 1
mapAName :: (Typeable a, Typeable b, Ord b, Show b) => (a -> b) -> AName -> AName
mapAName f an@(AName x) = case cast x of
Nothing -> an
Just a -> AName (f a)
mapName :: (Typeable a, Typeable b, Ord b, Show b) => (a -> b) -> Name -> Name
mapName f (Name ns) = Name (map (mapAName f) ns)
type Edge a = (a,a)
type Relator a = (a,[a],a)
mkRelator :: Edge a -> Relator a
mkRelator (n1,n2) = (n1,[],n2)
start :: Relator a -> a
start (n,_,_) = n
end :: Relator a -> a
end (_,_,n) = n
relatorToList :: Relator a -> [a]
relatorToList (a,bs,c) = a : bs ++ [c]
isTailOf :: Eq a => Relator a -> Relator a -> Bool
isTailOf r1 r2 = relatorToList r1 `isSuffixOf` relatorToList r2 && r1 /= r2
composeRelators :: Eq a => Relator a -> Relator a -> Maybe (Relator a)
composeRelators (s1,ns1,e1) (s2,ns2,e2)
| e1 == s2 = Just (s1,ns1++[e1]++ns2,e2)
| otherwise = Nothing
type Relation a = [Relator a]
mkRelation :: [Edge a] -> Relation a
mkRelation = map mkRelator
emptyR :: Relation a
emptyR = []
unionR :: Relation a -> Relation a -> Relation a
unionR = (++)
composeR :: Eq a => Relation a -> Relation a -> Relation a
composeR rs1 rs2 = [ rel | rel1 <- rs1, rel2 <- rs2, Just rel <- [composeRelators rel1 rel2] ]
orbits :: Eq a => Relation a -> Relation a -> Relation a
orbits r1 r2 = removeTails $ orbits' r2 r1 r1
where
orbits' _ _ [] = []
orbits' r1 r2 r = done `unionR` orbits' r2 r1 (r' `composeR` r1)
where
(done, r') = partition finished r
finished rel = (start rel == end rel) || all ((/= end rel) . start) r1
removeTails rs = filter (\r -> not (any (r `isTailOf`) rs)) rs
bijToRel :: Bij -> Relation Name
bijToRel = foldr unionR emptyR . map bijToRel1
where
bijToRel1 bij = mkRelation . catMaybes . map (_2 id . (id &&& (bij^.bijData))) $ bij^.bijDomain
orbitsToColorMap :: Ord a => [Colour Double] -> Relation a -> M.Map a (Colour Double)
orbitsToColorMap colors orbs = M.fromList (concat $ zipWith (\rel c -> map (,c) rel) (map relatorToList orbs) (cycle colors))
colorBij :: M.Map Name (Colour Double) -> Bij -> Bij
colorBij colors = map colorBij'
where
colorBij' bij = bij & bijStyle .~ \n -> maybe id lc (M.lookup n colors) ((bij ^. bijStyle) n)
a0, b0, a1, b1 :: ASet
a0 = nset 3 yellow
b0 = nset 3 blue
a1 = nset 2 green
b1 = nset 2 red
bc0, bc1, bc01 :: BComplex
bc0 = [a0] .- bij0 -.. [b0]
bc1 = [a1] .- bij1 -.. [b1]
bc01 = [a0,a1] .- bij01 -.. [b0,b1]
bij0, bij1 :: Bij
bij0 = [mkABij a0 b0 ((`mod` 3) . succ . succ)]
bij1 = [mkABij a1 b1 id]
names01, names02 :: [Name]
names01 = 'X' |> disjointly concat [head bij0^.bijDomain, head bij1^.bijDomain]
names02 = 'Y' |> (('a' |@@ [1,2]) ++ ('b' |@@ [0,1]) ++ ('a' |@@ [0]))
bij01 :: Bij
bij01 = []
|
d5e5679739216d6180c92e19b9adefe9a1cc2baedcd60c04de46b5b2cddcbf76 | williamleferrand/aws | creds.ml | type t = {
aws_access_key_id : string ;
aws_secret_access_key : string ;
}
| null | https://raw.githubusercontent.com/williamleferrand/aws/d591ef0a2b89082caac6ddd6850b2d8b7824e577/src/creds.ml | ocaml | type t = {
aws_access_key_id : string ;
aws_secret_access_key : string ;
}
| |
8c35c89a4ecd75074066f23edb0b68cf112544a6522dde3b28e19f1169c96b7f | flora-pm/flora-server | Requirement.hs | module Flora.Model.Requirement where
import Crypto.Hash.MD5 qualified as MD5
import Data.Aeson (FromJSON, ToJSON)
import Data.Data
import Data.Foldable (foldl')
import Data.Map.Strict qualified as Map
import Data.Text (Text)
import Data.Text.Display
import Data.UUID (UUID, fromByteString)
import Data.Vector (Vector)
import Database.PostgreSQL.Entity (Entity)
import Database.PostgreSQL.Entity.Types (GenericEntity, TableName)
import Database.PostgreSQL.Simple (ToRow)
import Database.PostgreSQL.Simple.FromField
( FromField
, fromField
, fromJSONField
)
import Database.PostgreSQL.Simple.FromRow (FromRow (..))
import Database.PostgreSQL.Simple.ToField (ToField, toField, toJSONField)
import GHC.Generics (Generic)
import Control.DeepSeq
import Data.ByteString.Lazy (fromStrict)
import Data.Maybe (fromJust)
import Data.Text.Encoding (encodeUtf8)
import Distribution.SPDX.License qualified as SPDX
import Distribution.Types.Version (Version)
import Flora.Model.Package.Component
import Flora.Model.Package.Types
newtype RequirementId = RequirementId {getRequirementId :: UUID}
deriving
(Eq, Show, FromField, ToField, FromJSON, ToJSON, NFData)
via UUID
deriving (Display) via ShowInstance UUID
deterministicRequirementId :: ComponentId -> PackageId -> RequirementId
deterministicRequirementId componentId packageId =
RequirementId . fromJust . fromByteString . fromStrict . MD5.hash . encodeUtf8 $! concatenated
where
concatenated = display componentId <> display packageId
data Requirement = Requirement
{ requirementId :: RequirementId
-- ^ Unique identifier to this requirement in the database
, packageComponentId :: ComponentId
-- ^ Package component that depends on this requirement
, packageId :: PackageId
-- ^ Package that is being depended on
, requirement :: Text
-- ^ The human-readable version range expression of this requirement
, metadata :: RequirementMetadata
-- ^ Additional metadata, like flags
}
deriving stock (Eq, Show, Generic)
deriving anyclass (FromRow, ToRow, NFData, FromJSON, ToJSON)
deriving
(Entity)
via (GenericEntity '[TableName "requirements"] Requirement)
deriving
(Display)
via ShowInstance Requirement
data RequirementMetadata = RequirementMetadata
{ flag :: Maybe Text
}
deriving stock (Eq, Show, Generic, Typeable)
deriving anyclass (ToJSON, FromJSON, NFData)
instance FromField RequirementMetadata where
fromField = fromJSONField
instance ToField RequirementMetadata where
toField = toJSONField
-- | This datatype holds information about the latest version of a dependency
data DependencyInfo = DependencyInfo
{ namespace :: Namespace
, name :: PackageName
, requirement :: Text
, latestVersion :: Version
, latestSynopsis :: Text
, latestLicense :: SPDX.License
}
deriving stock (Eq, Show, Generic)
deriving anyclass (FromRow, NFData)
-- | Data Access Object for component dependencies to read from db
data ComponentDependency' = ComponentDependency'
{ componentType :: ComponentType
, componentName :: Text
, namespace :: Namespace
, name :: PackageName
, requirement :: Text
, latestVersion :: Version
, latestSynopsis :: Text
, latestLicense :: SPDX.License
}
deriving stock (Eq, Show, Generic)
deriving anyclass (FromRow, NFData)
-- | Map of components to its dependencies
type ComponentDependencies = Map.Map CanonicalComponent (Vector DependencyInfo)
toComponentDependencies :: Vector ComponentDependency' -> ComponentDependencies
toComponentDependencies = foldl' go Map.empty
where
go acc ComponentDependency'{..} =
Map.insertWith (<>) (CanonicalComponent{..}) (pure DependencyInfo{..}) acc
| null | https://raw.githubusercontent.com/flora-pm/flora-server/fe2d9fec16d00dea65ff03f47f3d94fa51e23c3d/src/core/Flora/Model/Requirement.hs | haskell | ^ Unique identifier to this requirement in the database
^ Package component that depends on this requirement
^ Package that is being depended on
^ The human-readable version range expression of this requirement
^ Additional metadata, like flags
| This datatype holds information about the latest version of a dependency
| Data Access Object for component dependencies to read from db
| Map of components to its dependencies | module Flora.Model.Requirement where
import Crypto.Hash.MD5 qualified as MD5
import Data.Aeson (FromJSON, ToJSON)
import Data.Data
import Data.Foldable (foldl')
import Data.Map.Strict qualified as Map
import Data.Text (Text)
import Data.Text.Display
import Data.UUID (UUID, fromByteString)
import Data.Vector (Vector)
import Database.PostgreSQL.Entity (Entity)
import Database.PostgreSQL.Entity.Types (GenericEntity, TableName)
import Database.PostgreSQL.Simple (ToRow)
import Database.PostgreSQL.Simple.FromField
( FromField
, fromField
, fromJSONField
)
import Database.PostgreSQL.Simple.FromRow (FromRow (..))
import Database.PostgreSQL.Simple.ToField (ToField, toField, toJSONField)
import GHC.Generics (Generic)
import Control.DeepSeq
import Data.ByteString.Lazy (fromStrict)
import Data.Maybe (fromJust)
import Data.Text.Encoding (encodeUtf8)
import Distribution.SPDX.License qualified as SPDX
import Distribution.Types.Version (Version)
import Flora.Model.Package.Component
import Flora.Model.Package.Types
newtype RequirementId = RequirementId {getRequirementId :: UUID}
deriving
(Eq, Show, FromField, ToField, FromJSON, ToJSON, NFData)
via UUID
deriving (Display) via ShowInstance UUID
deterministicRequirementId :: ComponentId -> PackageId -> RequirementId
deterministicRequirementId componentId packageId =
RequirementId . fromJust . fromByteString . fromStrict . MD5.hash . encodeUtf8 $! concatenated
where
concatenated = display componentId <> display packageId
data Requirement = Requirement
{ requirementId :: RequirementId
, packageComponentId :: ComponentId
, packageId :: PackageId
, requirement :: Text
, metadata :: RequirementMetadata
}
deriving stock (Eq, Show, Generic)
deriving anyclass (FromRow, ToRow, NFData, FromJSON, ToJSON)
deriving
(Entity)
via (GenericEntity '[TableName "requirements"] Requirement)
deriving
(Display)
via ShowInstance Requirement
data RequirementMetadata = RequirementMetadata
{ flag :: Maybe Text
}
deriving stock (Eq, Show, Generic, Typeable)
deriving anyclass (ToJSON, FromJSON, NFData)
instance FromField RequirementMetadata where
fromField = fromJSONField
instance ToField RequirementMetadata where
toField = toJSONField
data DependencyInfo = DependencyInfo
{ namespace :: Namespace
, name :: PackageName
, requirement :: Text
, latestVersion :: Version
, latestSynopsis :: Text
, latestLicense :: SPDX.License
}
deriving stock (Eq, Show, Generic)
deriving anyclass (FromRow, NFData)
data ComponentDependency' = ComponentDependency'
{ componentType :: ComponentType
, componentName :: Text
, namespace :: Namespace
, name :: PackageName
, requirement :: Text
, latestVersion :: Version
, latestSynopsis :: Text
, latestLicense :: SPDX.License
}
deriving stock (Eq, Show, Generic)
deriving anyclass (FromRow, NFData)
type ComponentDependencies = Map.Map CanonicalComponent (Vector DependencyInfo)
toComponentDependencies :: Vector ComponentDependency' -> ComponentDependencies
toComponentDependencies = foldl' go Map.empty
where
go acc ComponentDependency'{..} =
Map.insertWith (<>) (CanonicalComponent{..}) (pure DependencyInfo{..}) acc
|
d8a6db91bc9a7f8c64442a69c46fe8f81e6d17c3c256b41608a9a25c4af0525a | cfpb/qu | aggregation.clj | (ns ^:integration integration.test.aggregation
(:require [clojure.test :refer :all]
[qu.data.aggregation :refer :all]
[qu.test-util :refer :all]
[qu.data :as data]
[qu.data.compression :refer [compress-where field-zip-fn field-unzip-fn]]
[qu.loader :as loader]))
(use-fixtures :once (mongo-setup-fn "integration_test"))
(deftest test-agg-query
(testing "it generates the appropriate agg-query"
(let [metadata (data/get-metadata "integration_test")
slicedef (get-in metadata [:slices :incomes])]
(let [agg-query (generate-agg-query {:dataset "integration_test"
:from "incomes"
:to "test1"
:group [:state_abbr]
:aggregations {:max_tax_returns ["max" "tax_returns"]}
:slicedef slicedef})
z (comp name (field-zip-fn slicedef))]
(is (= agg-query
[{"$group" {:_id {:state_abbr (str "$" (z "state_abbr"))}
:max_tax_returns {"$max" (str "$" (z "tax_returns"))}}}
{"$project" {:_id 0
:state_abbr "$_id.state_abbr"
:max_tax_returns 1}}
{"$out" "test1"}]))))))
;; (run-tests)
| null | https://raw.githubusercontent.com/cfpb/qu/f460d9ab2f05ac22f6d68a98a9641daf0f7c7ba4/test/integration/test/aggregation.clj | clojure | (run-tests) | (ns ^:integration integration.test.aggregation
(:require [clojure.test :refer :all]
[qu.data.aggregation :refer :all]
[qu.test-util :refer :all]
[qu.data :as data]
[qu.data.compression :refer [compress-where field-zip-fn field-unzip-fn]]
[qu.loader :as loader]))
(use-fixtures :once (mongo-setup-fn "integration_test"))
(deftest test-agg-query
(testing "it generates the appropriate agg-query"
(let [metadata (data/get-metadata "integration_test")
slicedef (get-in metadata [:slices :incomes])]
(let [agg-query (generate-agg-query {:dataset "integration_test"
:from "incomes"
:to "test1"
:group [:state_abbr]
:aggregations {:max_tax_returns ["max" "tax_returns"]}
:slicedef slicedef})
z (comp name (field-zip-fn slicedef))]
(is (= agg-query
[{"$group" {:_id {:state_abbr (str "$" (z "state_abbr"))}
:max_tax_returns {"$max" (str "$" (z "tax_returns"))}}}
{"$project" {:_id 0
:state_abbr "$_id.state_abbr"
:max_tax_returns 1}}
{"$out" "test1"}]))))))
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.