Copyright | (c) 2023 Input Output Global Inc. (IOG) (c) 2023-2025 INTERSECT |
---|---|
License | Apache-2.0 |
Stability | experimental |
Portability | portable |
Safe Haskell | Safe-Inferred |
Language | GHC2021 |
Database.LSMTree
Description
Synopsis
- type IOLike m = (MonadAsync m, MonadMVar m, MonadThrow m, MonadThrow (STM m), MonadCatch m, MonadMask m, PrimMonad m, MonadST m)
- data Session m
- withSession :: forall m h a. (IOLike m, Typeable h) => Tracer m LSMTreeTrace -> HasFS m h -> HasBlockIO m h -> FsPath -> (Session m -> m a) -> m a
- withSessionIO :: Tracer IO LSMTreeTrace -> FilePath -> (Session IO -> IO a) -> IO a
- openSession :: forall m h. (IOLike m, Typeable h) => Tracer m LSMTreeTrace -> HasFS m h -> HasBlockIO m h -> FsPath -> m (Session m)
- openSessionIO :: Tracer IO LSMTreeTrace -> FilePath -> IO (Session IO)
- closeSession :: forall m. IOLike m => Session m -> m ()
- data Table m k v b
- withTable :: forall m k v b a. IOLike m => Session m -> (Table m k v b -> m a) -> m a
- withTableWith :: forall m k v b a. IOLike m => TableConfig -> Session m -> (Table m k v b -> m a) -> m a
- newTable :: forall m k v b. IOLike m => Session m -> m (Table m k v b)
- newTableWith :: forall m k v b. IOLike m => TableConfig -> Session m -> m (Table m k v b)
- closeTable :: forall m k v b. IOLike m => Table m k v b -> m ()
- member :: forall m k v b. IOLike m => (SerialiseKey k, SerialiseValue v, ResolveValue v) => Table m k v b -> k -> m Bool
- members :: forall m k v b. IOLike m => (SerialiseKey k, SerialiseValue v, ResolveValue v) => Table m k v b -> Vector k -> m (Vector Bool)
- data LookupResult v b
- = NotFound
- | Found !v
- | FoundWithBlob !v !b
- getValue :: LookupResult v b -> Maybe v
- getBlob :: LookupResult v b -> Maybe b
- lookup :: forall m k v b. IOLike m => (SerialiseKey k, SerialiseValue v, ResolveValue v) => Table m k v b -> k -> m (LookupResult v (BlobRef m b))
- lookups :: forall m k v b. IOLike m => (SerialiseKey k, SerialiseValue v, ResolveValue v) => Table m k v b -> Vector k -> m (Vector (LookupResult v (BlobRef m b)))
- data Entry k v b
- = Entry !k !v
- | EntryWithBlob !k !v !b
- rangeLookup :: forall m k v b. IOLike m => (SerialiseKey k, SerialiseValue v, ResolveValue v) => Table m k v b -> Range k -> m (Vector (Entry k v (BlobRef m b)))
- insert :: forall m k v b. IOLike m => (SerialiseKey k, SerialiseValue v, ResolveValue v, SerialiseValue b) => Table m k v b -> k -> v -> Maybe b -> m ()
- inserts :: forall m k v b. IOLike m => (SerialiseKey k, SerialiseValue v, ResolveValue v, SerialiseValue b) => Table m k v b -> Vector (k, v, Maybe b) -> m ()
- upsert :: forall m k v b. IOLike m => (SerialiseKey k, SerialiseValue v, ResolveValue v, SerialiseValue b) => Table m k v b -> k -> v -> m ()
- upserts :: forall m k v b. IOLike m => (SerialiseKey k, SerialiseValue v, ResolveValue v, SerialiseValue b) => Table m k v b -> Vector (k, v) -> m ()
- delete :: forall m k v b. IOLike m => (SerialiseKey k, SerialiseValue v, ResolveValue v, SerialiseValue b) => Table m k v b -> k -> m ()
- deletes :: forall m k v b. IOLike m => (SerialiseKey k, SerialiseValue v, ResolveValue v, SerialiseValue b) => Table m k v b -> Vector k -> m ()
- data Update v b
- update :: forall m k v b. IOLike m => (SerialiseKey k, SerialiseValue v, ResolveValue v, SerialiseValue b) => Table m k v b -> k -> Update v b -> m ()
- updates :: forall m k v b. IOLike m => (SerialiseKey k, SerialiseValue v, ResolveValue v, SerialiseValue b) => Table m k v b -> Vector (k, Update v b) -> m ()
- withDuplicate :: forall m k v b a. IOLike m => Table m k v b -> (Table m k v b -> m a) -> m a
- duplicate :: forall m k v b. IOLike m => Table m k v b -> m (Table m k v b)
- withUnion :: forall m k v b a. IOLike m => ResolveValue v => Table m k v b -> Table m k v b -> (Table m k v b -> m a) -> m a
- withUnions :: forall m k v b a. IOLike m => ResolveValue v => NonEmpty (Table m k v b) -> (Table m k v b -> m a) -> m a
- union :: forall m k v b. IOLike m => ResolveValue v => Table m k v b -> Table m k v b -> m (Table m k v b)
- unions :: forall m k v b. IOLike m => ResolveValue v => NonEmpty (Table m k v b) -> m (Table m k v b)
- withIncrementalUnion :: forall m k v b a. IOLike m => Table m k v b -> Table m k v b -> (Table m k v b -> m a) -> m a
- withIncrementalUnions :: forall m k v b a. IOLike m => NonEmpty (Table m k v b) -> (Table m k v b -> m a) -> m a
- incrementalUnion :: forall m k v b. IOLike m => Table m k v b -> Table m k v b -> m (Table m k v b)
- incrementalUnions :: forall m k v b. IOLike m => NonEmpty (Table m k v b) -> m (Table m k v b)
- remainingUnionDebt :: forall m k v b. IOLike m => Table m k v b -> m UnionDebt
- supplyUnionCredits :: forall m k v b. IOLike m => ResolveValue v => Table m k v b -> UnionCredits -> m UnionCredits
- data BlobRef m b
- retrieveBlob :: forall m b. (IOLike m, SerialiseValue b) => Session m -> BlobRef m b -> m b
- retrieveBlobs :: forall m b. (IOLike m, SerialiseValue b) => Session m -> Vector (BlobRef m b) -> m (Vector b)
- data Cursor m k v b
- withCursor :: forall m k v b a. IOLike m => ResolveValue v => Table m k v b -> (Cursor m k v b -> m a) -> m a
- withCursorAtOffset :: forall m k v b a. IOLike m => (SerialiseKey k, ResolveValue v) => Table m k v b -> k -> (Cursor m k v b -> m a) -> m a
- newCursor :: forall m k v b. IOLike m => ResolveValue v => Table m k v b -> m (Cursor m k v b)
- newCursorAtOffset :: forall m k v b. IOLike m => (SerialiseKey k, ResolveValue v) => Table m k v b -> k -> m (Cursor m k v b)
- closeCursor :: forall m k v b. IOLike m => Cursor m k v b -> m ()
- next :: forall m k v b. IOLike m => (SerialiseKey k, SerialiseValue v, ResolveValue v) => Cursor m k v b -> m (Maybe (Entry k v (BlobRef m b)))
- take :: forall m k v b. IOLike m => (SerialiseKey k, SerialiseValue v, ResolveValue v) => Int -> Cursor m k v b -> m (Vector (Entry k v (BlobRef m b)))
- takeWhile :: forall m k v b. IOLike m => (SerialiseKey k, SerialiseValue v, ResolveValue v) => Int -> (k -> Bool) -> Cursor m k v b -> m (Vector (Entry k v (BlobRef m b)))
- saveSnapshot :: forall m k v b. IOLike m => SnapshotName -> SnapshotLabel -> Table m k v b -> m ()
- withTableFromSnapshot :: forall m k v b a. IOLike m => ResolveValue v => Session m -> SnapshotName -> SnapshotLabel -> (Table m k v b -> m a) -> m a
- withTableFromSnapshotWith :: forall m k v b a. IOLike m => ResolveValue v => OverrideDiskCachePolicy -> Session m -> SnapshotName -> SnapshotLabel -> (Table m k v b -> m a) -> m a
- openTableFromSnapshot :: forall m k v b. IOLike m => ResolveValue v => Session m -> SnapshotName -> SnapshotLabel -> m (Table m k v b)
- openTableFromSnapshotWith :: forall m k v b. IOLike m => ResolveValue v => OverrideDiskCachePolicy -> Session m -> SnapshotName -> SnapshotLabel -> m (Table m k v b)
- doesSnapshotExist :: forall m. IOLike m => Session m -> SnapshotName -> m Bool
- deleteSnapshot :: forall m. IOLike m => Session m -> SnapshotName -> m ()
- listSnapshots :: forall m. IOLike m => Session m -> m [SnapshotName]
- data SnapshotName
- isValidSnapshotName :: String -> Bool
- toSnapshotName :: String -> SnapshotName
- newtype SnapshotLabel = SnapshotLabel Text
- data TableConfig
- defaultTableConfig :: TableConfig
- data MergePolicy = LazyLevelling
- data MergeSchedule
- data SizeRatio = Four
- data WriteBufferAlloc = AllocNumEntries !Int
- data BloomFilterAlloc
- data FencePointerIndexType
- data DiskCachePolicy
- data OverrideDiskCachePolicy
- data Range k
- = FromToExcluding k k
- | FromToIncluding k k
- newtype UnionCredits = UnionCredits Int
- newtype UnionDebt = UnionDebt Int
- newtype RawBytes = RawBytes (Vector Word8)
- class SerialiseKey k where
- serialiseKey :: k -> RawBytes
- deserialiseKey :: RawBytes -> k
- class SerialiseKey k => SerialiseKeyOrderPreserving k
- class SerialiseValue v where
- serialiseValue :: v -> RawBytes
- deserialiseValue :: RawBytes -> v
- serialiseKeyIdentity :: (Eq k, SerialiseKey k) => k -> Bool
- serialiseKeyIdentityUpToSlicing :: (Eq k, SerialiseKey k) => RawBytes -> k -> RawBytes -> Bool
- serialiseKeyPreservesOrdering :: (Ord k, SerialiseKey k) => k -> k -> Bool
- serialiseKeyMinimalSize :: SerialiseKey k => k -> Bool
- serialiseValueIdentity :: (Eq v, SerialiseValue v) => v -> Bool
- serialiseValueIdentityUpToSlicing :: (Eq v, SerialiseValue v) => RawBytes -> v -> RawBytes -> Bool
- packSlice :: RawBytes -> RawBytes -> RawBytes -> RawBytes
- class ResolveValue v where
- newtype ResolveViaSemigroup v = ResolveViaSemigroup v
- newtype ResolveAsFirst v = ResolveAsFirst {
- unResolveAsFirst :: v
- resolveCompatibility :: (SerialiseValue v, ResolveValue v) => v -> v -> Bool
- resolveValidOutput :: (SerialiseValue v, ResolveValue v, NFData v) => v -> v -> Bool
- resolveAssociativity :: (SerialiseValue v, ResolveValue v) => v -> v -> v -> Bool
- data SessionDirDoesNotExistError = ErrSessionDirDoesNotExist !FsErrorPath
- data SessionDirLockedError = ErrSessionDirLocked !FsErrorPath
- data SessionDirCorruptedError = ErrSessionDirCorrupted !FsErrorPath
- data SessionClosedError = ErrSessionClosed
- data TableClosedError = ErrTableClosed
- data TableCorruptedError = ErrLookupByteCountDiscrepancy !ByteCount !ByteCount
- data TableTooLargeError = ErrTableTooLarge
- data TableUnionNotCompatibleError
- data SnapshotExistsError = ErrSnapshotExists !SnapshotName
- data SnapshotDoesNotExistError = ErrSnapshotDoesNotExist !SnapshotName
- data SnapshotCorruptedError = ErrSnapshotCorrupted !SnapshotName !FileCorruptedError
- data SnapshotNotCompatibleError = ErrSnapshotWrongLabel !SnapshotName !SnapshotLabel !SnapshotLabel
- data BlobRefInvalidError = ErrBlobRefInvalid !Int
- data CursorClosedError = ErrCursorClosed
- data InvalidSnapshotNameError = ErrInvalidSnapshotName !String
- data Tracer (m :: Type -> Type) a
- data LSMTreeTrace
- = TraceOpenSession FsPath
- | TraceNewSession
- | TraceRestoreSession
- | TraceCloseSession
- | TraceNewTable
- | TraceOpenTableFromSnapshot SnapshotName OverrideDiskCachePolicy
- | TraceTable TableId TableTrace
- | TraceDeleteSnapshot SnapshotName
- | TraceListSnapshots
- | TraceCursor CursorId CursorTrace
- | TraceUnions (NonEmpty TableId)
- data TableTrace
- data CursorTrace
- data MergeTrace
- = TraceFlushWriteBuffer NumEntries RunNumber RunParams
- | TraceAddLevel
- | TraceAddRun RunNumber (Vector RunNumber)
- | TraceNewMerge (Vector NumEntries) RunNumber RunParams MergePolicyForLevel LevelMergeType
- | TraceNewMergeSingleRun NumEntries RunNumber
- | TraceCompletedMerge NumEntries RunNumber
- | TraceExpectCompletedMerge RunNumber
- newtype CursorId = CursorId Int
- newtype TableId = TableId Int
- data AtLevel a = AtLevel LevelNo a
- newtype LevelNo = LevelNo Int
- newtype NumEntries = NumEntries Int
- newtype RunNumber = RunNumber Int
- data MergePolicyForLevel
- data LevelMergeType
- data RunParams = RunParams {}
- data RunDataCaching
- data IndexType
Usage Notes
This section focuses on the differences between the full API as defined in this module and the simple API as defined in Database.LSMTree.Simple. It assumes that the reader is familiar with Usage Notes for the simple API, which discusses crucial topics such as Resource Management, Concurrency, ACID properties, and Sharing.
Real and Simulated IO
type IOLike m = (MonadAsync m, MonadMVar m, MonadThrow m, MonadThrow (STM m), MonadCatch m, MonadMask m, PrimMonad m, MonadST m) Source #
Examples
The examples in this module use the preamble described in this section, which does three things:
- It imports this module qualified, as intended, as well as any other relevant modules.
- It defines types for keys, values, and BLOBs.
- It defines a helper function that runs examples with access to an open session and fresh table.
Importing Database.LSMTree
This module is intended to be imported qualified, to avoid name clashes with Prelude functions.
>>>
import Database.LSMTree (BlobRef, Cursor, RawBytes, ResolveValue (..), SerialiseKey (..), SerialiseValue (..), Session, Table)
>>>
import qualified Database.LSMTree as LSMT
Defining key, value, and BLOB types
The examples in this module use the types Key
, Value
, and Blob
for keys, values and BLOBs.
>>>
import Data.ByteString (ByteString)
>>>
import Data.ByteString.Short (ShortByteString)
>>>
import Data.Proxy (Proxy)
>>>
import Data.String (IsString)
>>>
import Data.Word (Word64)
The type Key
is a newtype wrapper around Word64
.
The required instance of SerialiseKey
is derived by GeneralisedNewtypeDeriving
from the preexisting instance for Word64
.
>>>
:{
newtype Key = Key Word64 deriving stock (Eq, Ord, Show) deriving newtype (Num, SerialiseKey) :}
The type Value
is a newtype wrapper around ShortByteString
.
The required instance of SerialiseValue
is derived by GeneralisedNewtypeDeriving
from the preexisting instance for ShortByteString
.
>>>
:{
newtype Value = Value ShortByteString deriving stock (Eq, Show) deriving newtype (IsString, SerialiseValue) :}
The type Value
has an instance of ResolveValue
which appends the new value to the old value separated by a space.
It is sufficient to define either resolve
or resolveSerialised
,
as each can be defined in terms of the other and serialiseValue
/deserialiseValue
.
For optimal performance, you should always define resolveSerialised
manually.
NOTE:
The first argument of resolve
and resolveSerialised
is the new value and the second argument is the old value.
>>>
:{
instance ResolveValue Value where resolve :: Value -> Value -> Value resolve (Value new) (Value old) = Value (new <> " " <> old) resolveSerialised :: Proxy Value -> RawBytes -> RawBytes -> RawBytes resolveSerialised _ new old = new <> " " <> old :}
The type Blob
is a newtype wrapper around ByteString
,
The required instance of SerialiseValue
is derived by GeneralisedNewtypeDeriving
from the preexisting instance for ByteString
.
>>>
:{
newtype Blob = Blob ByteString deriving stock (Eq, Show) deriving newtype (IsString, SerialiseValue) :}
Defining a helper function to run examples
The examples in this module are wrapped in a call to runExample
,
which creates a temporary session directory and
runs the example with access to an open Session
and a fresh Table
.
>>>
import Control.Exception (bracket, bracket_)
>>>
import Data.Foldable (traverse_)
>>>
import qualified System.Directory as Dir
>>>
import System.FilePath ((</>))
>>>
import System.Process (getCurrentPid)
>>>
:{
runExample :: (Session IO -> Table IO Key Value Blob -> IO a) -> IO a runExample action = do tmpDir <- Dir.getTemporaryDirectory pid <- getCurrentPid let sessionDir = tmpDir </> "doctest_Database_LSMTree" </> show pid let createSessionDir = Dir.createDirectoryIfMissing True sessionDir let removeSessionDir = Dir.removeDirectoryRecursive sessionDir bracket_ createSessionDir removeSessionDir $ do LSMT.withSessionIO mempty sessionDir $ \session -> do LSMT.withTable session $ \table -> action session table :}
Sessions
A session stores context that is shared by multiple tables.
Each session is associated with one session directory where the files containing table data are stored. Each session locks its session directory. There can only be one active session for each session directory at a time. If a database is must be accessed from multiple parts of a program, one session should be opened and shared between those parts of the program. Session directories cannot be shared between OS processes.
A session may contain multiple tables, which may each have a different configuration and different key, value, and BLOB types. Furthermore, sessions may contain both simple and full-featured tables.
Arguments
:: forall m h a. (IOLike m, Typeable h) | |
=> Tracer m LSMTreeTrace | |
-> HasFS m h | |
-> HasBlockIO m h | |
-> FsPath | The session directory. |
-> (Session m -> m a) | |
-> m a |
Run an action with access to a session opened from a session directory.
If the session directory is empty, a new session is created. Otherwise, the session directory is opened as an existing session.
If there are no open tables or cursors when the session terminates, then the disk I/O complexity of this operation is \(O(1)\).
Otherwise, closeTable
is called for each open table and closeCursor
is called for each open cursor.
Consequently, the worst-case disk I/O complexity of this operation depends on the merge policy of the open tables in the session.
The following assumes all tables in the session have the same merge policy:
LazyLevelling
- \(O(o \: T \log_T \frac{n}{B})\).
The variable \(o\) refers to the number of open tables and cursors in the session.
This function is exception-safe for both synchronous and asynchronous exceptions.
It is recommended to use this function instead of openSession
and closeSession
.
Throws the following exceptions:
SessionDirDoesNotExistError
- If the session directory does not exist.
SessionDirLockedError
- If the session directory is locked by another process.
SessionDirCorruptedError
- If the session directory is malformed.
withSessionIO :: Tracer IO LSMTreeTrace -> FilePath -> (Session IO -> IO a) -> IO a Source #
Variant of withSession
that is specialised to IO
using the real filesystem.
Arguments
:: forall m h. (IOLike m, Typeable h) | |
=> Tracer m LSMTreeTrace | |
-> HasFS m h | |
-> HasBlockIO m h | |
-> FsPath | The session directory. |
-> m (Session m) |
Open a session from a session directory.
If the session directory is empty, a new session is created. Otherwise, the session directory is opened as an existing session.
The worst-case disk I/O complexity of this operation is \(O(1)\).
Warning: Sessions hold open resources and must be closed using closeSession
.
Throws the following exceptions:
SessionDirDoesNotExistError
- If the session directory does not exist.
SessionDirLockedError
- If the session directory is locked by another process.
SessionDirCorruptedError
- If the session directory is malformed.
openSessionIO :: Tracer IO LSMTreeTrace -> FilePath -> IO (Session IO) Source #
Variant of openSession
that is specialised to IO
using the real filesystem.
closeSession :: forall m. IOLike m => Session m -> m () Source #
Close a session.
If there are no open tables or cursors in the session, then the disk I/O complexity of this operation is \(O(1)\).
Otherwise, closeTable
is called for each open table and closeCursor
is called for each open cursor.
Consequently, the worst-case disk I/O complexity of this operation depends on the merge policy of the tables in the session.
The following assumes all tables in the session have the same merge policy:
LazyLevelling
- \(O(o \: T \log_T \frac{n}{B})\).
The variable \(o\) refers to the number of open tables and cursors in the session.
Closing is idempotent, i.e., closing a closed session does nothing. All other operations on a closed session will throw an exception.
Tables
A table is a handle to an individual LSM-tree key/value store with both in-memory and on-disk parts.
Warning: Tables are ephemeral. Once you close a table, its data is lost forever. To persist tables, use snapshots.
withTable :: forall m k v b a. IOLike m => Session m -> (Table m k v b -> m a) -> m a Source #
Run an action with access to an empty table.
The worst-case disk I/O complexity of this operation depends on the merge policy of the table:
LazyLevelling
- \(O(T \log_T \frac{n}{B})\).
This function is exception-safe for both synchronous and asynchronous exceptions.
It is recommended to use this function instead of newTable
and closeTable
.
Throws the following exceptions:
SessionClosedError
- If the session is closed.
withTableWith :: forall m k v b a. IOLike m => TableConfig -> Session m -> (Table m k v b -> m a) -> m a Source #
Variant of withTable
that accepts table configuration.
newTable :: forall m k v b. IOLike m => Session m -> m (Table m k v b) Source #
Create an empty table.
The worst-case disk I/O complexity of this operation is \(O(1)\).
Warning: Tables hold open resources and must be closed using closeTable
.
Throws the following exceptions:
SessionClosedError
- If the session is closed.
newTableWith :: forall m k v b. IOLike m => TableConfig -> Session m -> m (Table m k v b) Source #
Variant of newTable
that accepts table configuration.
closeTable :: forall m k v b. IOLike m => Table m k v b -> m () Source #
Close a table.
The worst-case disk I/O complexity of this operation depends on the merge policy of the table:
LazyLevelling
- \(O(T \log_T \frac{n}{B})\).
Closing is idempotent, i.e., closing a closed table does nothing. All other operations on a closed table will throw an exception.
Warning: Tables are ephemeral. Once you close a table, its data is lost forever. To persist tables, use snapshots.
Table Lookups
member :: forall m k v b. IOLike m => (SerialiseKey k, SerialiseValue v, ResolveValue v) => Table m k v b -> k -> m Bool Source #
Check if the key is a member of the table.
>>>
:{
runExample $ \session table -> do LSMT.insert table 0 "Hello" Nothing LSMT.insert table 1 "World" Nothing print =<< LSMT.member table 0 :} True
The worst-case disk I/O complexity of this operation depends on the merge policy of the table:
LazyLevelling
- \(O(T \log_T \frac{n}{B})\).
Membership tests can be performed concurrently from multiple Haskell threads.
Throws the following exceptions:
SessionClosedError
- If the session is closed.
TableClosedError
- If the table is closed.
TableCorruptedError
- If the table data is corrupted.
members :: forall m k v b. IOLike m => (SerialiseKey k, SerialiseValue v, ResolveValue v) => Table m k v b -> Vector k -> m (Vector Bool) Source #
Variant of member
for batch membership tests.
The batch of keys corresponds in-order to the batch of results.
The worst-case disk I/O complexity of this operation depends on the merge policy of the table:
LazyLevelling
- \(O(b \: T \log_T \frac{n}{B})\).
The variable \(b\) refers to the length of the input vector.
The following property holds in the absence of races:
members table keys = traverse (member table) keys
data LookupResult v b Source #
Constructors
NotFound | |
Found !v | |
FoundWithBlob !v !b |
Instances
getValue :: LookupResult v b -> Maybe v Source #
Get the field of type v
from a
, if any.LookupResult
v b
getBlob :: LookupResult v b -> Maybe b Source #
Get the field of type b
from a
, if any.LookupResult
v b
The following property holds:
isJust (getBlob result) <= isJust (getValue result)
lookup :: forall m k v b. IOLike m => (SerialiseKey k, SerialiseValue v, ResolveValue v) => Table m k v b -> k -> m (LookupResult v (BlobRef m b)) Source #
Look up the value associated with a key.
>>>
:{
runExample $ \session table -> do LSMT.insert table 0 "Hello" Nothing LSMT.insert table 1 "World" Nothing print =<< LSMT.lookup table 0 :} Found (Value "Hello")
If the key is not associated with any value, lookup
returns NotFound
.
>>>
:{
runExample $ \session table -> do LSMT.lookup table 0 :} NotFound
If the key has an associated BLOB, the result contains a BlobRef
.
The full BLOB can be retrieved by passing that BlobRef
to retrieveBlob
.
>>>
:{
runExample $ \session table -> do LSMT.insert table 0 "Hello" (Just "World") print =<< traverse (LSMT.retrieveBlob session) =<< LSMT.lookup table 0 :} FoundWithBlob (Value "Hello") (Blob "World")
The worst-case disk I/O complexity of this operation depends on the merge policy of the table:
LazyLevelling
- \(O(T \log_T \frac{n}{B})\).
Lookups can be performed concurrently from multiple Haskell threads.
Throws the following exceptions:
SessionClosedError
- If the session is closed.
TableClosedError
- If the table is closed.
TableCorruptedError
- If the table data is corrupted.
lookups :: forall m k v b. IOLike m => (SerialiseKey k, SerialiseValue v, ResolveValue v) => Table m k v b -> Vector k -> m (Vector (LookupResult v (BlobRef m b))) Source #
Variant of lookup
for batch lookups.
The batch of keys corresponds in-order to the batch of results.
The worst-case disk I/O complexity of this operation depends on the merge policy of the table:
LazyLevelling
- \(O(b \: T \log_T \frac{n}{B})\).
The variable \(b\) refers to the length of the input vector.
The following property holds in the absence of races:
lookups table keys = traverse (lookup table) keys
Constructors
Entry !k !v | |
EntryWithBlob !k !v !b |
Instances
Bifunctor (Entry k) Source # | |
Foldable (Entry k v) Source # | |
Defined in Database.LSMTree Methods fold :: Monoid m => Entry k v m -> m # foldMap :: Monoid m => (a -> m) -> Entry k v a -> m # foldMap' :: Monoid m => (a -> m) -> Entry k v a -> m # foldr :: (a -> b -> b) -> b -> Entry k v a -> b # foldr' :: (a -> b -> b) -> b -> Entry k v a -> b # foldl :: (b -> a -> b) -> b -> Entry k v a -> b # foldl' :: (b -> a -> b) -> b -> Entry k v a -> b # foldr1 :: (a -> a -> a) -> Entry k v a -> a # foldl1 :: (a -> a -> a) -> Entry k v a -> a # toList :: Entry k v a -> [a] # length :: Entry k v a -> Int # elem :: Eq a => a -> Entry k v a -> Bool # maximum :: Ord a => Entry k v a -> a # minimum :: Ord a => Entry k v a -> a # | |
Traversable (Entry k v) Source # | |
Defined in Database.LSMTree | |
Functor (Entry k v) Source # | |
(Show k, Show v, Show b) => Show (Entry k v b) Source # | |
(NFData k, NFData v, NFData b) => NFData (Entry k v b) Source # | |
Defined in Database.LSMTree | |
(Eq k, Eq v, Eq b) => Eq (Entry k v b) Source # | |
rangeLookup :: forall m k v b. IOLike m => (SerialiseKey k, SerialiseValue v, ResolveValue v) => Table m k v b -> Range k -> m (Vector (Entry k v (BlobRef m b))) Source #
Look up a batch of values associated with keys in the given range.
The worst-case disk I/O complexity of this operation is \(O(T \log_T \frac{n}{B} + \frac{b}{P})\), where the variable \(b\) refers to the length of the output vector.
Range lookups can be performed concurrently from multiple Haskell threads.
Throws the following exceptions:
SessionClosedError
- If the session is closed.
TableClosedError
- If the table is closed.
TableCorruptedError
- If the table data is corrupted.
Table Updates
insert :: forall m k v b. IOLike m => (SerialiseKey k, SerialiseValue v, ResolveValue v, SerialiseValue b) => Table m k v b -> k -> v -> Maybe b -> m () Source #
Insert associates the given value and BLOB with the given key in the table.
>>>
:{
runExample $ \session table -> do LSMT.insert table 0 "Hello" Nothing LSMT.insert table 1 "World" Nothing print =<< LSMT.lookup table 0 :} Found (Value "Hello")
Insert may optionally associate a BLOB value with the given key.
>>>
:{
runExample $ \session table -> do LSMT.insert table 0 "Hello" (Just "World") print =<< traverse (retrieveBlob session) =<< LSMT.lookup table 0 :} FoundWithBlob (Value "Hello") (Blob "World")
Insert overwrites any value and BLOB previously associated with the given key,
even if the given BLOB is Nothing
.
>>>
:{
runExample $ \session table -> do LSMT.insert table 0 "Hello" (Just "World") LSMT.insert table 0 "Goodbye" Nothing print =<< traverse (retrieveBlob session) =<< LSMT.lookup table 0 :} Found (Value "Goodbye")
The worst-case disk I/O complexity of this operation depends on the merge policy and the merge schedule of the table:
LazyLevelling
/Incremental
- \(O(\frac{1}{P} \: \log_T \frac{n}{B})\).
LazyLevelling
/OneShot
- \(O(\frac{n}{P})\).
Throws the following exceptions:
SessionClosedError
- If the session is closed.
TableClosedError
- If the table is closed.
inserts :: forall m k v b. IOLike m => (SerialiseKey k, SerialiseValue v, ResolveValue v, SerialiseValue b) => Table m k v b -> Vector (k, v, Maybe b) -> m () Source #
Variant of insert
for batch insertions.
The worst-case disk I/O complexity of this operation depends on the merge policy and the merge schedule of the table:
LazyLevelling
/Incremental
- \(O(b \: \frac{1}{P} \: \log_T \frac{n}{B})\).
LazyLevelling
/OneShot
- \(O(\frac{b}{P} \log_T \frac{b}{B} + \frac{n}{P})\).
The variable \(b\) refers to the length of the input vector.
The following property holds in the absence of races:
inserts table entries = traverse_ (uncurry $ insert table) entries
upsert :: forall m k v b. IOLike m => (SerialiseKey k, SerialiseValue v, ResolveValue v, SerialiseValue b) => Table m k v b -> k -> v -> m () Source #
If the given key is not a member of the table, upsert
associates the given value with the given key in the table.
Otherwise, upsert
updates the value associated with the given key by combining it with the given value using resolve
.
>>>
:{
runExample $ \session table -> do LSMT.upsert table 0 "Hello" LSMT.upsert table 0 "Goodbye" print =<< LSMT.lookup table 0 :} Found (Value "Goodbye Hello")
Warning: Upsert deletes any BLOB previously associated with the given key.
>>>
:{
runExample $ \session table -> do LSMT.insert table 0 "Hello" (Just "World") LSMT.upsert table 0 "Goodbye" print =<< traverse (LSMT.retrieveBlob session) =<< LSMT.lookup table 0 :} Found (Value "Goodbye Hello")
The worst-case disk I/O complexity of this operation depends on the merge policy and the merge schedule of the table:
LazyLevelling
/Incremental
- \(O(\frac{1}{P} \: \log_T \frac{n}{B})\).
LazyLevelling
/OneShot
- \(O(\frac{n}{P})\).
Throws the following exceptions:
SessionClosedError
- If the session is closed.
TableClosedError
- If the table is closed.
The following property holds in the absence of races:
upsert table k v = do r <- lookup table k let v' = maybe v (resolve v) (getValue r) insert table k v' Nothing
upserts :: forall m k v b. IOLike m => (SerialiseKey k, SerialiseValue v, ResolveValue v, SerialiseValue b) => Table m k v b -> Vector (k, v) -> m () Source #
Variant of upsert
for batch insertions.
The worst-case disk I/O complexity of this operation depends on the merge policy and the merge schedule of the table:
LazyLevelling
/Incremental
- \(O(b \: \frac{1}{P} \: \log_T \frac{n}{B})\).
LazyLevelling
/OneShot
- \(O(\frac{b}{P} \log_T \frac{b}{B} + \frac{n}{P})\).
The variable \(b\) refers to the length of the input vector.
The following property holds in the absence of races:
upserts table entries = traverse_ (uncurry $ upsert table) entries
delete :: forall m k v b. IOLike m => (SerialiseKey k, SerialiseValue v, ResolveValue v, SerialiseValue b) => Table m k v b -> k -> m () Source #
Delete a key from the table.
>>>
:{
runExample $ \session table -> do LSMT.insert table 0 "Hello" Nothing LSMT.delete table 0 print =<< LSMT.lookup table 0 :} NotFound
If the key is not a member of the table, the table is left unchanged.
>>>
:{
runExample $ \session table -> do LSMT.insert table 0 "Hello" Nothing LSMT.delete table 1 print =<< LSMT.lookup table 0 :} Found (Value "Hello")
The worst-case disk I/O complexity of this operation depends on the merge policy and the merge schedule of the table:
LazyLevelling
/Incremental
- \(O(\frac{1}{P} \: \log_T \frac{n}{B})\).
LazyLevelling
/OneShot
- \(O(\frac{n}{P})\).
Throws the following exceptions:
SessionClosedError
- If the session is closed.
TableClosedError
- If the table is closed.
deletes :: forall m k v b. IOLike m => (SerialiseKey k, SerialiseValue v, ResolveValue v, SerialiseValue b) => Table m k v b -> Vector k -> m () Source #
Variant of delete
for batch deletions.
The worst-case disk I/O complexity of this operation depends on the merge policy and the merge schedule of the table:
LazyLevelling
/Incremental
- \(O(b \: \frac{1}{P} \: \log_T \frac{n}{B})\).
LazyLevelling
/OneShot
- \(O(\frac{b}{P} \log_T \frac{b}{B} + \frac{n}{P})\).
The variable \(b\) refers to the length of the input vector.
The following property holds in the absence of races:
deletes table keys = traverse_ (delete table) keys
update :: forall m k v b. IOLike m => (SerialiseKey k, SerialiseValue v, ResolveValue v, SerialiseValue b) => Table m k v b -> k -> Update v b -> m () Source #
Update generalises insert
, delete
, and upsert
.
The worst-case disk I/O complexity of this operation depends on the merge policy and the merge schedule of the table:
LazyLevelling
/Incremental
- \(O(\frac{1}{P} \: \log_T \frac{n}{B})\).
LazyLevelling
/OneShot
- \(O(\frac{n}{P})\).
The following properties hold:
update table k (Insert v mb) = insert table k v mb
update table k Delete = delete table k
update table k (Upsert v) = upsert table k v
Throws the following exceptions:
SessionClosedError
- If the session is closed.
TableClosedError
- If the table is closed.
updates :: forall m k v b. IOLike m => (SerialiseKey k, SerialiseValue v, ResolveValue v, SerialiseValue b) => Table m k v b -> Vector (k, Update v b) -> m () Source #
Variant of update
for batch updates.
The worst-case disk I/O complexity of this operation depends on the merge policy and the merge schedule of the table:
LazyLevelling
/Incremental
- \(O(b \: \frac{1}{P} \: \log_T \frac{n}{B})\).
LazyLevelling
/OneShot
- \(O(\frac{b}{P} \log_T \frac{b}{B} + \frac{n}{P})\).
The variable \(b\) refers to the length of the input vector.
The following property holds in the absence of races:
updates table entries = traverse_ (uncurry $ update table) entries
Table Duplication
withDuplicate :: forall m k v b a. IOLike m => Table m k v b -> (Table m k v b -> m a) -> m a Source #
Run an action with access to the duplicate of a table.
The duplicate is an independent copy of the given table. Subsequent updates to the original table do not affect the duplicate, and vice versa.
>>>
:{
runExample $ \session table -> do LSMT.insert table 0 "Hello" Nothing LSMT.withDuplicate table $ \table' -> do print =<< LSMT.lookup table' 0 LSMT.insert table' 0 "Goodbye" Nothing print =<< LSMT.lookup table' 0 LSMT.lookup table 0 print =<< LSMT.lookup table 0 :} Found (Value "Hello") Found (Value "Goodbye") Found (Value "Hello")
The worst-case disk I/O complexity of this operation depends on the merge policy of the table:
LazyLevelling
- \(O(T \log_T \frac{n}{B})\).
This function is exception-safe for both synchronous and asynchronous exceptions.
It is recommended to use this function instead of duplicate
and closeTable
.
Throws the following exceptions:
SessionClosedError
- If the session is closed.
TableClosedError
- If the table is closed.
duplicate :: forall m k v b. IOLike m => Table m k v b -> m (Table m k v b) Source #
Duplicate a table.
The duplicate is an independent copy of the given table. Subsequent updates to the original table do not affect the duplicate, and vice versa.
>>>
:{
runExample $ \session table -> do LSMT.insert table 0 "Hello" Nothing bracket (LSMT.duplicate table) LSMT.closeTable $ \table' -> do print =<< LSMT.lookup table' 0 LSMT.insert table' 0 "Goodbye" Nothing print =<< LSMT.lookup table' 0 LSMT.lookup table 0 print =<< LSMT.lookup table 0 :} Found (Value "Hello") Found (Value "Goodbye") Found (Value "Hello")
The worst-case disk I/O complexity of this operation is \(O(0)\).
Warning: The duplicate must be independently closed using closeTable
.
Throws the following exceptions:
SessionClosedError
- If the session is closed.
TableClosedError
- If the table is closed.
Table Unions
withUnion :: forall m k v b a. IOLike m => ResolveValue v => Table m k v b -> Table m k v b -> (Table m k v b -> m a) -> m a Source #
Run an action with access to a table that contains the union of the entries of the given tables.
>>>
:{
runExample $ \session table1 -> do LSMT.insert table1 0 "Hello" Nothing LSMT.withTable session $ \table2 -> do LSMT.insert table2 0 "World" Nothing LSMT.insert table2 1 "Goodbye" Nothing LSMT.withUnion table1 table2 $ \table3 -> do print =<< LSMT.lookup table3 0 print =<< LSMT.lookup table3 1 print =<< LSMT.lookup table1 0 print =<< LSMT.lookup table2 0 :} Found (Value "Hello World") Found (Value "Goodbye") Found (Value "Hello") Found (Value "World")
The worst-case disk I/O complexity of this operation is \(O(\frac{n}{P})\).
This function is exception-safe for both synchronous and asynchronous exceptions.
It is recommended to use this function instead of union
and closeTable
.
Warning: Both input tables must be from the same Session
.
Warning: This is a relatively expensive operation that may take some time to complete.
See withIncrementalUnion
for an incremental alternative.
Throws the following exceptions:
SessionClosedError
- If the session is closed.
TableClosedError
- If the table is closed.
TableUnionNotCompatibleError
- If both tables are not from the same
Session
.
withUnions :: forall m k v b a. IOLike m => ResolveValue v => NonEmpty (Table m k v b) -> (Table m k v b -> m a) -> m a Source #
Variant of withUnions
that takes any number of tables.
union :: forall m k v b. IOLike m => ResolveValue v => Table m k v b -> Table m k v b -> m (Table m k v b) Source #
Create a table that contains the union of the entries of the given tables.
If the given key is a member of a single input table, then the same key and value occur in the output table.
Otherwise, the values for duplicate keys are combined using resolve
from left to right.
If the resolve
function behaves like const
, then this computes a left-biased union.
>>>
:{
runExample $ \session table1 -> do LSMT.insert table1 0 "Hello" Nothing LSMT.withTable session $ \table2 -> do LSMT.insert table2 0 "World" Nothing LSMT.insert table2 1 "Goodbye" Nothing bracket (LSMT.union table1 table2) LSMT.closeTable $ \table3 -> do print =<< LSMT.lookup table3 0 print =<< LSMT.lookup table3 1 print =<< LSMT.lookup table1 0 print =<< LSMT.lookup table2 0 :} Found (Value "Hello World") Found (Value "Goodbye") Found (Value "Hello") Found (Value "World")
The worst-case disk I/O complexity of this operation is \(O(\frac{n}{P})\).
Warning: The new table must be independently closed using closeTable
.
Warning: Both input tables must be from the same Session
.
Warning: This is a relatively expensive operation that may take some time to complete.
See incrementalUnion
for an incremental alternative.
Throws the following exceptions:
SessionClosedError
- If the session is closed.
TableClosedError
- If the table is closed.
TableUnionNotCompatibleError
- If both tables are not from the same
Session
.
unions :: forall m k v b. IOLike m => ResolveValue v => NonEmpty (Table m k v b) -> m (Table m k v b) Source #
Variant of union
that takes any number of tables.
withIncrementalUnion :: forall m k v b a. IOLike m => Table m k v b -> Table m k v b -> (Table m k v b -> m a) -> m a Source #
Run an action with access to a table that incrementally computes the union of the given tables.
>>>
:{
runExample $ \session table1 -> do LSMT.insert table1 0 "Hello" Nothing LSMT.withTable session $ \table2 -> do LSMT.insert table2 0 "World" Nothing LSMT.insert table2 1 "Goodbye" Nothing LSMT.withIncrementalUnion table1 table2 $ \table3 -> do print =<< LSMT.lookup table3 0 print =<< LSMT.lookup table3 1 print =<< LSMT.lookup table1 0 print =<< LSMT.lookup table2 0 :} Found (Value "Hello World") Found (Value "Goodbye") Found (Value "Hello") Found (Value "World")
The worst-case disk I/O complexity of this operation depends on the merge policy of the table:
LazyLevelling
- \(O(T \log_T \frac{n}{B})\).
This function is exception-safe for both synchronous and asynchronous exceptions.
It is recommended to use this function instead of incrementalUnion
and closeTable
.
The created table has a union debt which represents the amount of computation that remains. See remainingUnionDebt
.
The union debt can be paid off by supplying union credit which performs an amount of computation proportional to the amount of union credit. See supplyUnionCredits
.
While a table has unresolved union debt, operations may become more expensive by a factor that scales with the number of unresolved unions.
Warning: Both input tables must be from the same Session
.
Throws the following exceptions:
SessionClosedError
- If the session is closed.
TableClosedError
- If the table is closed.
TableUnionNotCompatibleError
- If both tables are not from the same
Session
.
withIncrementalUnions :: forall m k v b a. IOLike m => NonEmpty (Table m k v b) -> (Table m k v b -> m a) -> m a Source #
Variant of withIncrementalUnion
that takes any number of tables.
The worst-case disk I/O complexity of this operation depends on the merge policy of the table:
LazyLevelling
- \(O(T \log_T \frac{n}{B} + b)\).
The variable \(b\) refers to the number of input tables.
incrementalUnion :: forall m k v b. IOLike m => Table m k v b -> Table m k v b -> m (Table m k v b) Source #
Create a table that incrementally computes the union of the given tables.
>>>
:{
runExample $ \session table1 -> do LSMT.insert table1 0 "Hello" Nothing LSMT.withTable session $ \table2 -> do LSMT.insert table2 0 "World" Nothing LSMT.insert table2 1 "Goodbye" Nothing bracket (LSMT.incrementalUnion table1 table2) LSMT.closeTable $ \table3 -> do print =<< LSMT.lookup table3 0 print =<< LSMT.lookup table3 1 print =<< LSMT.lookup table1 0 print =<< LSMT.lookup table2 0 :} Found (Value "Hello World") Found (Value "Goodbye") Found (Value "Hello") Found (Value "World")
The worst-case disk I/O complexity of this operation is \(O(1)\).
The created table has a union debt which represents the amount of computation that remains. See remainingUnionDebt
.
The union debt can be paid off by supplying union credit which performs an amount of computation proportional to the amount of union credit. See supplyUnionCredits
.
While a table has unresolved union debt, operations may become more expensive by a factor that scales with the number of unresolved unions.
Warning: The new table must be independently closed using closeTable
.
Warning: Both input tables must be from the same Session
.
Throws the following exceptions:
SessionClosedError
- If the session is closed.
TableClosedError
- If the table is closed.
TableUnionNotCompatibleError
- If both tables are not from the same
Session
.
incrementalUnions :: forall m k v b. IOLike m => NonEmpty (Table m k v b) -> m (Table m k v b) Source #
Variant of incrementalUnion
for any number of tables.
The worst-case disk I/O complexity of this operation is \(O(b)\), where the variable \(b\) refers to the number of input tables.
remainingUnionDebt :: forall m k v b. IOLike m => Table m k v b -> m UnionDebt Source #
Get an upper bound for the amount of remaining union debt. This includes the union debt of any table that was part of the union's input.
>>>
:{
runExample $ \session table1 -> do LSMT.insert table1 0 "Hello" Nothing LSMT.withTable session $ \table2 -> do LSMT.insert table2 0 "World" Nothing LSMT.insert table2 1 "Goodbye" Nothing bracket (LSMT.incrementalUnion table1 table2) LSMT.closeTable $ \table3 -> do putStrLn . ("UnionDebt: "<>) . show =<< LSMT.remainingUnionDebt table3 :} UnionDebt: 4
The worst-case disk I/O complexity of this operation is \(O(0)\).
Throws the following exceptions:
SessionClosedError
- If the session is closed.
TableClosedError
- If the table is closed.
supplyUnionCredits :: forall m k v b. IOLike m => ResolveValue v => Table m k v b -> UnionCredits -> m UnionCredits Source #
Supply the given amount of union credits.
This reduces the union debt by at least the number of supplied union credits.
It is therefore advisable to query remainingUnionDebt
every once in a while to get an upper bound on the current debt.
This function returns any surplus of union credits as leftover credits when a union has finished. In particular, if the returned number of credits is positive, then the union is finished.
>>>
:{
runExample $ \session table1 -> do LSMT.insert table1 0 "Hello" Nothing LSMT.withTable session $ \table2 -> do LSMT.insert table2 0 "World" Nothing LSMT.insert table2 1 "Goodbye" Nothing bracket (LSMT.incrementalUnion table1 table2) LSMT.closeTable $ \table3 -> do putStrLn . ("UnionDebt: "<>) . show =<< LSMT.remainingUnionDebt table3 putStrLn . ("Leftovers: "<>) . show =<< LSMT.supplyUnionCredits table3 2 putStrLn . ("UnionDebt: "<>) . show =<< LSMT.remainingUnionDebt table3 putStrLn . ("Leftovers: "<>) . show =<< LSMT.supplyUnionCredits table3 4 :} UnionDebt: 4 Leftovers: 0 UnionDebt: 2 Leftovers: 3
NOTE:
The remainingUnionDebt
functions gets an upper bound for the amount of remaning union debt.
In the example above, the second call to remainingUnionDebt
reports 2
, but the union debt is 1
.
Therefore, the second call to supplyUnionCredits
returns more leftovers than expected.
The worst-case disk I/O complexity of this operation is \(O(\frac{b}{P})\), where the variable \(b\) refers to the amount of credits supplied.
Throws the following exceptions:
SessionClosedError
- If the session is closed.
TableClosedError
- If the table is closed.
Blob References
A blob reference is a reference to an on-disk blob.
Warning: A blob reference is not stable. Any operation that modifies the table, cursor, or session that corresponds to a blob reference may cause it to be invalidated.
The word "blob" in this type comes from the acronym Binary Large Object.
retrieveBlob :: forall m b. (IOLike m, SerialiseValue b) => Session m -> BlobRef m b -> m b Source #
Retrieve the blob value from a blob reference.
>>>
:{
runExample $ \session table -> do LSMT.insert table 0 "Hello" (Just "World") print =<< traverse (LSMT.retrieveBlob session) =<< LSMT.lookup table 0 :} FoundWithBlob (Value "Hello") (Blob "World")
The worst-case disk I/O complexity of this operation is \(O(1)\).
Warning: A blob reference is not stable. Any operation that modifies the table, cursor, or session that corresponds to a blob reference may cause it to be invalidated.
Throws the following exceptions:
SessionClosedError
- If the session is closed.
BlobRefInvalidError
- If the blob reference has been invalidated.
retrieveBlobs :: forall m b. (IOLike m, SerialiseValue b) => Session m -> Vector (BlobRef m b) -> m (Vector b) Source #
Variant of retrieveBlob
for batch retrieval.
The batch of blob references corresponds in-order to the batch of results.
The worst-case disk I/O complexity of this operation is \(O(b)\), where the variable \(b\) refers to the length of the input vector.
The following property holds in the absence of races:
retrieveBlobs session blobRefs = traverse (retrieveBlob session) blobRefs
Cursors
A cursor is a stable read-only iterator for a table.
A cursor iterates over the entries in a table following the order of the serialised keys. After the cursor is created, updates to the referenced table do not affect the cursor.
The name of this type references database cursors, not, e.g., text editor cursors.
withCursor :: forall m k v b a. IOLike m => ResolveValue v => Table m k v b -> (Cursor m k v b -> m a) -> m a Source #
Run an action with access to a cursor.
>>>
:{
runExample $ \session table -> do LSMT.insert table 0 "Hello" Nothing LSMT.insert table 1 "World" Nothing LSMT.withCursor table $ \cursor -> do traverse_ print =<< LSMT.take 32 cursor :} Entry (Key 0) (Value "Hello") Entry (Key 1) (Value "World")
The worst-case disk I/O complexity of this operation depends on the merge policy of the table:
LazyLevelling
- \(O(T \log_T \frac{n}{B})\).
This function is exception-safe for both synchronous and asynchronous exceptions.
It is recommended to use this function instead of newCursor
and closeCursor
.
Throws the following exceptions:
SessionClosedError
- If the session is closed.
TableClosedError
- If the table is closed.
withCursorAtOffset :: forall m k v b a. IOLike m => (SerialiseKey k, ResolveValue v) => Table m k v b -> k -> (Cursor m k v b -> m a) -> m a Source #
Variant of withCursor
that starts at a given key.
>>>
:{
runExample $ \session table -> do LSMT.insert table 0 "Hello" Nothing LSMT.insert table 1 "World" Nothing LSMT.withCursorAtOffset table 1 $ \cursor -> do traverse_ print =<< LSMT.take 32 cursor :} Entry (Key 1) (Value "World")
newCursor :: forall m k v b. IOLike m => ResolveValue v => Table m k v b -> m (Cursor m k v b) Source #
Create a cursor for the given table.
>>>
:{
runExample $ \session table -> do LSMT.insert table 0 "Hello" Nothing LSMT.insert table 1 "World" Nothing bracket (LSMT.newCursor table) LSMT.closeCursor $ \cursor -> do traverse_ print =<< LSMT.take 32 cursor :} Entry (Key 0) (Value "Hello") Entry (Key 1) (Value "World")
The worst-case disk I/O complexity of this operation depends on the merge policy of the table:
LazyLevelling
- \(O(T \log_T \frac{n}{B})\).
Warning: Cursors hold open resources and must be closed using closeCursor
.
Throws the following exceptions:
SessionClosedError
- If the session is closed.
TableClosedError
- If the table is closed.
newCursorAtOffset :: forall m k v b. IOLike m => (SerialiseKey k, ResolveValue v) => Table m k v b -> k -> m (Cursor m k v b) Source #
Variant of newCursor
that starts at a given key.
>>>
:{
runExample $ \session table -> do LSMT.insert table 0 "Hello" Nothing LSMT.insert table 1 "World" Nothing bracket (LSMT.newCursorAtOffset table 1) LSMT.closeCursor $ \cursor -> do traverse_ print =<< LSMT.take 32 cursor :} Entry (Key 1) (Value "World")
closeCursor :: forall m k v b. IOLike m => Cursor m k v b -> m () Source #
Close a cursor.
The worst-case disk I/O complexity of this operation depends on the merge policy of the table:
LazyLevelling
- \(O(T \log_T \frac{n}{B})\).
Closing is idempotent, i.e., closing a closed cursor does nothing. All other operations on a closed cursor will throw an exception.
next :: forall m k v b. IOLike m => (SerialiseKey k, SerialiseValue v, ResolveValue v) => Cursor m k v b -> m (Maybe (Entry k v (BlobRef m b))) Source #
Read the next table entry from the cursor.
>>>
:{
runExample $ \session table -> do LSMT.insert table 0 "Hello" Nothing LSMT.insert table 1 "World" Nothing LSMT.withCursor table $ \cursor -> do print =<< LSMT.next cursor print =<< LSMT.next cursor print =<< LSMT.next cursor :} Just (Entry (Key 0) (Value "Hello")) Just (Entry (Key 1) (Value "World")) Nothing
The worst-case disk I/O complexity of this operation is \(O(\frac{1}{P})\).
Throws the following exceptions:
SessionClosedError
- If the session is closed.
CursorClosedError
- If the cursor is closed.
take :: forall m k v b. IOLike m => (SerialiseKey k, SerialiseValue v, ResolveValue v) => Int -> Cursor m k v b -> m (Vector (Entry k v (BlobRef m b))) Source #
Read the next batch of table entries from the cursor.
>>>
:{
runExample $ \session table -> do LSMT.insert table 0 "Hello" Nothing LSMT.insert table 1 "World" Nothing LSMT.withCursor table $ \cursor -> do traverse_ print =<< LSMT.take 32 cursor :} Entry (Key 0) (Value "Hello") Entry (Key 1) (Value "World")
The worst-case disk I/O complexity of this operation is \(O(\frac{b}{P})\), where the variable \(b\) refers to the length of the output vector, which is at most equal to the given number. In practice, the length of the output vector is only less than the given number once the cursor reaches the end of the table.
The following property holds:
take n cursor = catMaybes <$> replicateM n (next cursor)
Throws the following exceptions:
SessionClosedError
- If the session is closed.
CursorClosedError
- If the cursor is closed.
takeWhile :: forall m k v b. IOLike m => (SerialiseKey k, SerialiseValue v, ResolveValue v) => Int -> (k -> Bool) -> Cursor m k v b -> m (Vector (Entry k v (BlobRef m b))) Source #
Variant of take
that accepts an additional predicate to determine whether or not to continue reading.
>>>
:{
runExample $ \session table -> do LSMT.insert table 0 "Hello" Nothing LSMT.insert table 1 "World" Nothing LSMT.withCursor table $ \cursor -> do traverse_ print =<< LSMT.takeWhile 32 (<1) cursor :} Entry (Key 0) (Value "Hello")
The worst-case disk I/O complexity of this operation is \(O(\frac{b}{P})\), where the variable \(b\) refers to the length of the output vector, which is at most equal to the given number. In practice, the length of the output vector is only less than the given number when the predicate returns false or the cursor reaches the end of the table.
The following properties hold:
takeWhile n (const True) cursor = take n cursor
takeWhile n (const False) cursor = pure empty
Throws the following exceptions:
SessionClosedError
- If the session is closed.
CursorClosedError
- If the cursor is closed.
Snapshots
saveSnapshot :: forall m k v b. IOLike m => SnapshotName -> SnapshotLabel -> Table m k v b -> m () Source #
Save the current state of the table to disk as a snapshot under the given
snapshot name. This is the only mechanism that persists a table. Each snapshot
must have a unique name, which may be used to restore the table from that snapshot
using openTableFromSnapshot
.
Saving a snapshot does not close the table.
Saving a snapshot is relatively cheap when compared to opening a snapshot. However, it is not so cheap that one should use it after every operation.
>>>
:{
runExample $ \session table -> do LSMT.insert table 0 "Hello" Nothing LSMT.insert table 1 "World" Nothing LSMT.saveSnapshot "example" "Key Value Blob" table :}
The worst-case disk I/O complexity of this operation depends on the merge policy of the table:
LazyLevelling
- \(O(T \log_T \frac{n}{B})\).
Throws the following exceptions:
SessionClosedError
- If the session is closed.
TableClosedError
- If the table is closed.
SnapshotExistsError
- If a snapshot with the same name already exists.
withTableFromSnapshot :: forall m k v b a. IOLike m => ResolveValue v => Session m -> SnapshotName -> SnapshotLabel -> (Table m k v b -> m a) -> m a Source #
Run an action with access to a table from a snapshot.
>>>
:{
runExample $ \session table -> do -- Save snapshot LSMT.insert table 0 "Hello" Nothing LSMT.insert table 1 "World" Nothing LSMT.saveSnapshot "example" "Key Value Blob" table -- Open snapshot LSMT.withTableFromSnapshot @_ @Key @Value @Blob session "example" "Key Value Blob" $ \table' -> do LSMT.withCursor table' $ \cursor -> traverse_ print =<< LSMT.take 32 cursor :} Entry (Key 0) (Value "Hello") Entry (Key 1) (Value "World")
The worst-case disk I/O complexity of this operation is \(O(\frac{n}{P})\).
This function is exception-safe for both synchronous and asynchronous exceptions.
It is recommended to use this function instead of openTableFromSnapshot
and closeTable
.
Throws the following exceptions:
SessionClosedError
- If the session is closed.
TableClosedError
- If the table is closed.
SnapshotDoesNotExistError
- If no snapshot with the given name exists.
SnapshotCorruptedError
- If the snapshot data is corrupted.
SnapshotNotCompatibleError
- If the snapshot has a different label or is a different table type.
withTableFromSnapshotWith :: forall m k v b a. IOLike m => ResolveValue v => OverrideDiskCachePolicy -> Session m -> SnapshotName -> SnapshotLabel -> (Table m k v b -> m a) -> m a Source #
Variant of withTableFromSnapshot
that accepts table configuration overrides.
openTableFromSnapshot :: forall m k v b. IOLike m => ResolveValue v => Session m -> SnapshotName -> SnapshotLabel -> m (Table m k v b) Source #
Open a table from a named snapshot.
>>>
:{
runExample $ \session table -> do -- Save snapshot LSMT.insert table 0 "Hello" Nothing LSMT.insert table 1 "World" Nothing LSMT.saveSnapshot "example" "Key Value Blob" table -- Open snapshot bracket (LSMT.openTableFromSnapshot @_ @Key @Value @Blob session "example" "Key Value Blob") LSMT.closeTable $ \table' -> do LSMT.withCursor table' $ \cursor -> traverse_ print =<< LSMT.take 32 cursor :} Entry (Key 0) (Value "Hello") Entry (Key 1) (Value "World")
The worst-case disk I/O complexity of this operation is \(O(\frac{n}{P})\).
Warning: The new table must be independently closed using closeTable
.
Throws the following exceptions:
SessionClosedError
- If the session is closed.
TableClosedError
- If the table is closed.
SnapshotDoesNotExistError
- If no snapshot with the given name exists.
SnapshotCorruptedError
- If the snapshot data is corrupted.
SnapshotNotCompatibleError
- If the snapshot has a different label or is a different table type.
openTableFromSnapshotWith :: forall m k v b. IOLike m => ResolveValue v => OverrideDiskCachePolicy -> Session m -> SnapshotName -> SnapshotLabel -> m (Table m k v b) Source #
Variant of openTableFromSnapshot
that accepts table configuration overrides.
doesSnapshotExist :: forall m. IOLike m => Session m -> SnapshotName -> m Bool Source #
Check if the named snapshot exists.
>>>
:{
runExample $ \session table -> do -- Save snapshot LSMT.insert table 0 "Hello" Nothing LSMT.insert table 1 "World" Nothing LSMT.saveSnapshot "example" "Key Value Blob" table -- Check snapshots print =<< doesSnapshotExist session "example" print =<< doesSnapshotExist session "this_snapshot_does_not_exist" :} True False
The worst-case disk I/O complexity of this operation is \(O(1)\).
Throws the following exceptions:
SessionClosedError
- If the session is closed.
deleteSnapshot :: forall m. IOLike m => Session m -> SnapshotName -> m () Source #
Delete the named snapshot.
>>>
:{
runExample $ \session table -> do -- Save snapshot LSMT.insert table 0 "Hello" Nothing LSMT.insert table 1 "World" Nothing LSMT.saveSnapshot "example" "Key Value Blob" table -- Delete snapshot LSMT.deleteSnapshot session "example" :}
The worst-case disk I/O complexity of this operation depends on the merge policy of the table:
LazyLevelling
- \(O(T \log_T \frac{n}{B})\).
Throws the following exceptions:
SessionClosedError
- If the session is closed.
SnapshotDoesNotExistError
- If no snapshot with the given name exists.
listSnapshots :: forall m. IOLike m => Session m -> m [SnapshotName] Source #
List the names of all snapshots.
>>>
:{
runExample $ \session table -> do -- Save snapshot LSMT.insert table 0 "Hello" Nothing LSMT.insert table 1 "World" Nothing LSMT.saveSnapshot "example" "Key Value Blob" table -- List snapshots traverse_ print =<< listSnapshots session :} "example"
The worst-case disk I/O complexity of this operation is \(O(s)\), where \(s\) refers to the number of snapshots in the session.
Throws the following exceptions:
SessionClosedError
- If the session is closed.
data SnapshotName Source #
Instances
IsString SnapshotName Source # | The given string must satisfy |
Defined in Database.LSMTree.Internal.Paths Methods fromString :: String -> SnapshotName # | |
Show SnapshotName Source # | |
Defined in Database.LSMTree.Internal.Paths Methods showsPrec :: Int -> SnapshotName -> ShowS # show :: SnapshotName -> String # showList :: [SnapshotName] -> ShowS # | |
Eq SnapshotName Source # | |
Defined in Database.LSMTree.Internal.Paths | |
Ord SnapshotName Source # | |
Defined in Database.LSMTree.Internal.Paths Methods compare :: SnapshotName -> SnapshotName -> Ordering # (<) :: SnapshotName -> SnapshotName -> Bool # (<=) :: SnapshotName -> SnapshotName -> Bool # (>) :: SnapshotName -> SnapshotName -> Bool # (>=) :: SnapshotName -> SnapshotName -> Bool # max :: SnapshotName -> SnapshotName -> SnapshotName # min :: SnapshotName -> SnapshotName -> SnapshotName # |
isValidSnapshotName :: String -> Bool Source #
Check if a String
would be a valid snapshot name.
Snapshot names consist of lowercase characters, digits, dashes -
,
and underscores _
, and must be between 1 and 64 characters long.
>>> isValidSnapshotName "main"
True
>>>
isValidSnapshotName "temporary-123-test_"
True
>>>
isValidSnapshotName "UPPER"
False>>>
isValidSnapshotName "dir/dot.exe"
False>>>
isValidSnapshotName ".."
False>>>
isValidSnapshotName "\\"
False>>>
isValidSnapshotName ""
False>>>
isValidSnapshotName (replicate 100 'a')
False
Snapshot names must be valid directory on both POSIX and Windows. This rules out the following reserved file and directory names on Windows:
>>>
isValidSnapshotName "con"
False>>>
isValidSnapshotName "prn"
False>>>
isValidSnapshotName "aux"
False>>>
isValidSnapshotName "nul"
False>>>
isValidSnapshotName "com1" -- "com2", "com3", etc.
False>>>
isValidSnapshotName "lpt1" -- "lpt2", "lpt3", etc.
False
See, e.g., the VBA docs for the "Bad file name or number" error.
toSnapshotName :: String -> SnapshotName Source #
Create snapshot name.
The given string must satisfy isValidSnapshotName
.
Throws the following exceptions:
InvalidSnapshotNameError
- If the given string is not a valid snapshot name.
newtype SnapshotLabel Source #
Custom, user-supplied text that is included in the metadata.
The main use case for a SnapshotLabel
is for the user to supply textual
information about the key/value/blob type for the table that corresponds to
the snapshot. This information is used to dynamically check that a snapshot
is opened at the correct key/value/blob type.
Constructors
SnapshotLabel Text |
Instances
IsString SnapshotLabel Source # | |
Defined in Database.LSMTree.Internal.Snapshot Methods fromString :: String -> SnapshotLabel # | |
Show SnapshotLabel Source # | |
Defined in Database.LSMTree.Internal.Snapshot Methods showsPrec :: Int -> SnapshotLabel -> ShowS # show :: SnapshotLabel -> String # showList :: [SnapshotLabel] -> ShowS # | |
NFData SnapshotLabel Source # | |
Defined in Database.LSMTree.Internal.Snapshot Methods rnf :: SnapshotLabel -> () # | |
Eq SnapshotLabel Source # | |
Defined in Database.LSMTree.Internal.Snapshot Methods (==) :: SnapshotLabel -> SnapshotLabel -> Bool # (/=) :: SnapshotLabel -> SnapshotLabel -> Bool # | |
DecodeVersioned SnapshotLabel Source # | |
Defined in Database.LSMTree.Internal.Snapshot.Codec Methods decodeVersioned :: SnapshotVersion -> Decoder s SnapshotLabel Source # | |
Encode SnapshotLabel Source # | |
Defined in Database.LSMTree.Internal.Snapshot.Codec Methods encode :: SnapshotLabel -> Encoding Source # |
Table Configuration
data TableConfig Source #
A collection of configuration parameters for tables, which can be used to tune the performance of the table.
To construct a TableConfig
, modify the defaultTableConfig
, which defines reasonable defaults for all parameters.
For a detailed discussion of fine-tuning the table configuration, see Fine-tuning Table Configuration.
confMergePolicy ::
MergePolicy
- The merge policy balances the performance of lookups against the performance of updates. Levelling favours lookups. Tiering favours updates. Lazy levelling strikes a middle ground between levelling and tiering, and moderately favours updates. This parameter is explicitly referenced in the documentation of those operations it affects.
confSizeRatio ::
SizeRatio
- The size ratio pushes the effects of the merge policy to the extreme. If the size ratio is higher, levelling favours lookups more, and tiering and lazy levelling favour updates more. This parameter is referred to as \(T\) in the disk I/O cost of operations.
confWriteBufferAlloc ::
WriteBufferAlloc
- The write buffer capacity balances the performance of lookups and updates against the in-memory size of the database. If the write buffer is larger, it takes up more memory, but lookups and updates are more efficient. This parameter is referred to as \(B\) in the disk I/O cost of operations. Irrespective of this parameter, the write buffer size cannot exceed 4GiB.
confMergeSchedule ::
MergeSchedule
- The merge schedule balances the performance of lookups and updates against the consistency of updates. The merge schedule does not affect the performance of table unions. With the one-shot merge schedule, lookups and updates are more efficient overall, but some updates may take much longer than others. With the incremental merge schedule, lookups and updates are less efficient overall, but each update does a similar amount of work. This parameter is explicitly referenced in the documentation of those operations it affects.
confBloomFilterAlloc ::
BloomFilterAlloc
- The Bloom filter size balances the performance of lookups against the in-memory size of the database. If the Bloom filters are larger, they take up more memory, but lookup operations are more efficient.
confFencePointerIndex ::
FencePointerIndexType
- The fence-pointer index type supports two types of indexes. The ordinary indexes are designed to work with any key. The compact indexes are optimised for the case where the keys in the database are uniformly distributed, e.g., when the keys are hashes.
confDiskCachePolicy ::
DiskCachePolicy
- The disk cache policy supports caching lookup operations using the OS page cache. Caching may improve the performance of lookups if database access follows certain patterns.
Instances
Show TableConfig Source # | |
Defined in Database.LSMTree.Internal.Config Methods showsPrec :: Int -> TableConfig -> ShowS # show :: TableConfig -> String # showList :: [TableConfig] -> ShowS # | |
NFData TableConfig Source # | |
Defined in Database.LSMTree.Internal.Config Methods rnf :: TableConfig -> () # | |
Eq TableConfig Source # | |
Defined in Database.LSMTree.Internal.Config | |
DecodeVersioned TableConfig Source # | |
Defined in Database.LSMTree.Internal.Snapshot.Codec Methods decodeVersioned :: SnapshotVersion -> Decoder s TableConfig Source # | |
Encode TableConfig Source # | |
Defined in Database.LSMTree.Internal.Snapshot.Codec Methods encode :: TableConfig -> Encoding Source # |
defaultTableConfig :: TableConfig Source #
The defaultTableConfig
defines reasonable defaults for all TableConfig
parameters.
>>>
confMergePolicy defaultTableConfig
LazyLevelling>>>
confMergeSchedule defaultTableConfig
Incremental>>>
confSizeRatio defaultTableConfig
Four>>>
confWriteBufferAlloc defaultTableConfig
AllocNumEntries 20000>>>
confBloomFilterAlloc defaultTableConfig
AllocRequestFPR 1.0e-3>>>
confFencePointerIndex defaultTableConfig
OrdinaryIndex>>>
confDiskCachePolicy defaultTableConfig
DiskCacheAll
data MergePolicy Source #
The merge policy balances the performance of lookups against the performance of updates. Levelling favours lookups. Tiering favours updates. Lazy levelling strikes a middle ground between levelling and tiering, and moderately favours updates. This parameter is explicitly referenced in the documentation of those operations it affects.
NOTE: This package only supports lazy levelling.
For a detailed discussion of the merge policy, see Fine-tuning: Merge Policy, Size Ratio, and Write Buffer Size.
Constructors
LazyLevelling |
Instances
Show MergePolicy Source # | |
Defined in Database.LSMTree.Internal.Config Methods showsPrec :: Int -> MergePolicy -> ShowS # show :: MergePolicy -> String # showList :: [MergePolicy] -> ShowS # | |
NFData MergePolicy Source # | |
Defined in Database.LSMTree.Internal.Config Methods rnf :: MergePolicy -> () # | |
Eq MergePolicy Source # | |
Defined in Database.LSMTree.Internal.Config | |
DecodeVersioned MergePolicy Source # | |
Defined in Database.LSMTree.Internal.Snapshot.Codec Methods decodeVersioned :: SnapshotVersion -> Decoder s MergePolicy Source # | |
Encode MergePolicy Source # | |
Defined in Database.LSMTree.Internal.Snapshot.Codec Methods encode :: MergePolicy -> Encoding Source # |
data MergeSchedule Source #
The merge schedule balances the performance of lookups and updates against the consistency of updates. The merge schedule does not affect the performance of table unions. With the one-shot merge schedule, lookups and updates are more efficient overall, but some updates may take much longer than others. With the incremental merge schedule, lookups and updates are less efficient overall, but each update does a similar amount of work. This parameter is explicitly referenced in the documentation of those operations it affects.
For a detailed discussion of the effect of the merge schedule, see Fine-tuning: Merge Schedule.
Constructors
OneShot | The |
Incremental | The |
Instances
Show MergeSchedule Source # | |
Defined in Database.LSMTree.Internal.Config Methods showsPrec :: Int -> MergeSchedule -> ShowS # show :: MergeSchedule -> String # showList :: [MergeSchedule] -> ShowS # | |
NFData MergeSchedule Source # | |
Defined in Database.LSMTree.Internal.Config Methods rnf :: MergeSchedule -> () # | |
Eq MergeSchedule Source # | |
Defined in Database.LSMTree.Internal.Config Methods (==) :: MergeSchedule -> MergeSchedule -> Bool # (/=) :: MergeSchedule -> MergeSchedule -> Bool # | |
DecodeVersioned MergeSchedule Source # | |
Defined in Database.LSMTree.Internal.Snapshot.Codec Methods decodeVersioned :: SnapshotVersion -> Decoder s MergeSchedule Source # | |
Encode MergeSchedule Source # | |
Defined in Database.LSMTree.Internal.Snapshot.Codec Methods encode :: MergeSchedule -> Encoding Source # |
The size ratio pushes the effects of the merge policy to the extreme. If the size ratio is higher, levelling favours lookups more, and tiering and lazy levelling favour updates more. This parameter is referred to as \(T\) in the disk I/O cost of operations.
NOTE: This package only supports a size ratio of four.
For a detailed discussion of the size ratio, see Fine-tuning: Merge Policy, Size Ratio, and Write Buffer Size.
Constructors
Four |
Instances
Show SizeRatio Source # | |
NFData SizeRatio Source # | |
Defined in Database.LSMTree.Internal.Config | |
Eq SizeRatio Source # | |
DecodeVersioned SizeRatio Source # | |
Defined in Database.LSMTree.Internal.Snapshot.Codec Methods decodeVersioned :: SnapshotVersion -> Decoder s SizeRatio Source # | |
Encode SizeRatio Source # | |
data WriteBufferAlloc Source #
The write buffer capacity balances the performance of lookups and updates against the in-memory size of the table. If the write buffer is larger, it takes up more memory, but lookups and updates are more efficient. Irrespective of this parameter, the write buffer size cannot exceed 4GiB.
For a detailed discussion of the size ratio, see Fine-tuning: Merge Policy, Size Ratio, and Write Buffer Size.
Constructors
AllocNumEntries !Int | Allocate space for the in-memory write buffer to fit the requested number of entries. This parameter is referred to as \(B\) in the disk I/O cost of operations. |
Instances
Show WriteBufferAlloc Source # | |
Defined in Database.LSMTree.Internal.Config Methods showsPrec :: Int -> WriteBufferAlloc -> ShowS # show :: WriteBufferAlloc -> String # showList :: [WriteBufferAlloc] -> ShowS # | |
NFData WriteBufferAlloc Source # | |
Defined in Database.LSMTree.Internal.Config Methods rnf :: WriteBufferAlloc -> () # | |
Eq WriteBufferAlloc Source # | |
Defined in Database.LSMTree.Internal.Config Methods (==) :: WriteBufferAlloc -> WriteBufferAlloc -> Bool # (/=) :: WriteBufferAlloc -> WriteBufferAlloc -> Bool # | |
DecodeVersioned WriteBufferAlloc Source # | |
Defined in Database.LSMTree.Internal.Snapshot.Codec Methods decodeVersioned :: SnapshotVersion -> Decoder s WriteBufferAlloc Source # | |
Encode WriteBufferAlloc Source # | |
Defined in Database.LSMTree.Internal.Snapshot.Codec Methods encode :: WriteBufferAlloc -> Encoding Source # |
data BloomFilterAlloc Source #
The Bloom filter size balances the performance of lookups against the in-memory size of the table. If the Bloom filters are larger, they take up more memory, but lookup operations are more efficient.
For a detailed discussion of the Bloom filter size, see Fine-tuning: Bloom Filter Size.
Constructors
AllocFixed !Double | Allocate the requested number of bits per entry in the table. The value must strictly positive, but fractional values are permitted. The recommended range is \([2, 24]\). |
AllocRequestFPR !Double | Allocate the required number of bits per entry to get the requested false-positive rate. The value must be in the range \((0, 1)\). The recommended range is \([1\mathrm{e}{ -5 },1\mathrm{e}{ -2 }]\). |
Instances
Show BloomFilterAlloc Source # | |
Defined in Database.LSMTree.Internal.Config Methods showsPrec :: Int -> BloomFilterAlloc -> ShowS # show :: BloomFilterAlloc -> String # showList :: [BloomFilterAlloc] -> ShowS # | |
NFData BloomFilterAlloc Source # | |
Defined in Database.LSMTree.Internal.Config Methods rnf :: BloomFilterAlloc -> () # | |
Eq BloomFilterAlloc Source # | |
Defined in Database.LSMTree.Internal.Config Methods (==) :: BloomFilterAlloc -> BloomFilterAlloc -> Bool # (/=) :: BloomFilterAlloc -> BloomFilterAlloc -> Bool # | |
DecodeVersioned BloomFilterAlloc Source # | |
Defined in Database.LSMTree.Internal.Snapshot.Codec Methods decodeVersioned :: SnapshotVersion -> Decoder s BloomFilterAlloc Source # | |
Encode BloomFilterAlloc Source # | |
Defined in Database.LSMTree.Internal.Snapshot.Codec Methods encode :: BloomFilterAlloc -> Encoding Source # |
data FencePointerIndexType Source #
The fence-pointer index type supports two types of indexes. The ordinary indexes are designed to work with any key. The compact indexes are optimised for the case where the keys in the database are uniformly distributed, e.g., when the keys are hashes.
For a detailed discussion the fence-pointer index types, see Fine-tuning: Fence-Pointer Index Type.
Constructors
OrdinaryIndex | Ordinary indexes are designed to work with any key. When using an ordinary index, the |
CompactIndex | Compact indexes are designed for the case where the keys in the database are uniformly distributed, e.g., when the keys are hashes. When using a compact index, the
Use |
Instances
Show FencePointerIndexType Source # | |
Defined in Database.LSMTree.Internal.Config Methods showsPrec :: Int -> FencePointerIndexType -> ShowS # show :: FencePointerIndexType -> String # showList :: [FencePointerIndexType] -> ShowS # | |
NFData FencePointerIndexType Source # | |
Defined in Database.LSMTree.Internal.Config Methods rnf :: FencePointerIndexType -> () # | |
Eq FencePointerIndexType Source # | |
Defined in Database.LSMTree.Internal.Config Methods (==) :: FencePointerIndexType -> FencePointerIndexType -> Bool # (/=) :: FencePointerIndexType -> FencePointerIndexType -> Bool # | |
DecodeVersioned FencePointerIndexType Source # | |
Defined in Database.LSMTree.Internal.Snapshot.Codec Methods decodeVersioned :: SnapshotVersion -> Decoder s FencePointerIndexType Source # | |
Encode FencePointerIndexType Source # | |
Defined in Database.LSMTree.Internal.Snapshot.Codec Methods |
data DiskCachePolicy Source #
The disk cache policy determines if lookup operations use the OS page cache. Caching may improve the performance of lookups if database access follows certain patterns.
For a detailed discussion the disk cache policy, see Fine-tuning: Disk Cache Policy.
Constructors
DiskCacheAll | Cache all data in the table. Use this policy if the database's access pattern has either good spatial locality or both good spatial and temporal locality. |
DiskCacheLevelOneTo !Int | Cache the data in the freshest Use this policy if the database's access pattern only has good temporal locality. The variable |
DiskCacheNone | Do not cache any table data. Use this policy if the database's access pattern has does not have good spatial or temporal locality. For instance, if the access pattern is uniformly random. |
Instances
Show DiskCachePolicy Source # | |
Defined in Database.LSMTree.Internal.Config Methods showsPrec :: Int -> DiskCachePolicy -> ShowS # show :: DiskCachePolicy -> String # showList :: [DiskCachePolicy] -> ShowS # | |
NFData DiskCachePolicy Source # | |
Defined in Database.LSMTree.Internal.Config Methods rnf :: DiskCachePolicy -> () # | |
Eq DiskCachePolicy Source # | |
Defined in Database.LSMTree.Internal.Config Methods (==) :: DiskCachePolicy -> DiskCachePolicy -> Bool # (/=) :: DiskCachePolicy -> DiskCachePolicy -> Bool # | |
DecodeVersioned DiskCachePolicy Source # | |
Defined in Database.LSMTree.Internal.Snapshot.Codec Methods decodeVersioned :: SnapshotVersion -> Decoder s DiskCachePolicy Source # | |
Encode DiskCachePolicy Source # | |
Defined in Database.LSMTree.Internal.Snapshot.Codec Methods encode :: DiskCachePolicy -> Encoding Source # |
Table Configuration Overrides
data OverrideDiskCachePolicy Source #
The OverrideDiskCachePolicy
can be used to override the DiskCachePolicy
when opening a table from a snapshot.
Instances
Show OverrideDiskCachePolicy Source # | |
Defined in Database.LSMTree.Internal.Config.Override Methods showsPrec :: Int -> OverrideDiskCachePolicy -> ShowS # show :: OverrideDiskCachePolicy -> String # showList :: [OverrideDiskCachePolicy] -> ShowS # | |
Eq OverrideDiskCachePolicy Source # | |
Defined in Database.LSMTree.Internal.Config.Override Methods (==) :: OverrideDiskCachePolicy -> OverrideDiskCachePolicy -> Bool # (/=) :: OverrideDiskCachePolicy -> OverrideDiskCachePolicy -> Bool # |
Ranges
A range of keys.
Constructors
FromToExcluding k k |
|
FromToIncluding k k |
|
Union Credit and Debt
newtype UnionCredits Source #
Union credits are passed to supplyUnionCredits
to perform some amount of computation to incrementally complete a union.
Constructors
UnionCredits Int |
Instances
Union debt represents the amount of computation that must be performed before an incremental union is completed. This includes the cost of completing incremental unions that were part of a union's input.
Warning: The UnionDebt
returned by remainingUnionDebt
is an upper bound on the remaining union debt, not the exact union debt.
Instances
Num UnionDebt Source # | |
Defined in Database.LSMTree.Internal.Unsafe | |
Show UnionDebt Source # | |
Eq UnionDebt Source # | |
Ord UnionDebt Source # | |
Defined in Database.LSMTree.Internal.Unsafe |
Key/Value Serialisation
Raw bytes.
This type imposes no alignment constraint and provides no guarantee of whether the memory is pinned or unpinned.
Instances
IsString RawBytes Source # |
Warning: |
Defined in Database.LSMTree.Internal.RawBytes Methods fromString :: String -> RawBytes # | |
Monoid RawBytes Source # |
|
Semigroup RawBytes Source # |
|
IsList RawBytes Source # |
|
Show RawBytes Source # | |
NFData RawBytes Source # | |
Defined in Database.LSMTree.Internal.RawBytes | |
Eq RawBytes Source # | |
Ord RawBytes Source # | This instance uses lexicographic ordering. |
Defined in Database.LSMTree.Internal.RawBytes | |
Hashable RawBytes Source # | |
Defined in Database.LSMTree.Internal.RawBytes | |
type Item RawBytes Source # | |
Defined in Database.LSMTree.Internal.RawBytes |
class SerialiseKey k where Source #
Serialisation of keys.
Instances should satisfy the following laws:
- Identity
deserialiseKey
(serialiseKey
x) == x- Identity up to slicing
deserialiseKey
(packSlice
prefix (serialiseKey
x) suffix) == x
Instances
class SerialiseKey k => SerialiseKeyOrderPreserving k Source #
Order-preserving serialisation of keys.
Table data is sorted by serialised keys. Range lookups and cursors return entries in this order. If serialisation does not preserve the ordering of unserialised keys, then range lookups and cursors return entries out of order.
If the SerialiseKey
instance for a type preserves the ordering,
then it can safely be given an instance of SerialiseKeyOrderPreserving
.
These should satisfy the following law:
- Order-preserving
x `
compare
` y ==serialiseKey
x `compare
`serialiseKey
y
Serialised keys are lexicographically ordered. To satisfy the Order-preserving law, keys should be serialised into a big-endian format.
Instances
class SerialiseValue v where Source #
Serialisation of values and blobs.
Instances should satisfy the following laws:
- Identity
deserialiseValue
(serialiseValue
x) == x- Identity up to slicing
deserialiseValue
(packSlice
prefix (serialiseValue
x) suffix) == x
Instances
Key/Value Serialisation Property Tests
serialiseKeyIdentity :: (Eq k, SerialiseKey k) => k -> Bool Source #
Test the Identity law for the SerialiseKey
class
serialiseKeyIdentityUpToSlicing :: (Eq k, SerialiseKey k) => RawBytes -> k -> RawBytes -> Bool Source #
Test the Identity up to slicing law for the SerialiseKey
class
serialiseKeyPreservesOrdering :: (Ord k, SerialiseKey k) => k -> k -> Bool Source #
Test the Order-preserving law for the SerialiseKeyOrderPreserving
class
serialiseKeyMinimalSize :: SerialiseKey k => k -> Bool Source #
Test the Minimal size law for the CompactIndex
option.
serialiseValueIdentity :: (Eq v, SerialiseValue v) => v -> Bool Source #
Test the Identity law for the SerialiseValue
class
serialiseValueIdentityUpToSlicing :: (Eq v, SerialiseValue v) => RawBytes -> v -> RawBytes -> Bool Source #
Test the Identity up to slicing law for the SerialiseValue
class
packSlice :: RawBytes -> RawBytes -> RawBytes -> RawBytes Source #
makes packSlice
prefix x suffixx
into a slice with prefix
bytes on
the left and suffix
bytes on the right.
Monoidal Value Resolution
class ResolveValue v where Source #
An instance of
specifies how to merge values when using
monoidal upsert.ResolveValue
v
The class has two functions.
The function resolve
acts on values, whereas the function resolveSerialised
acts on serialised values.
Each function has a default implementation in terms of the other and serialisation/deserialisation.
The user is encouraged to implement resolveSerialised
.
Instances should satisfy the following:
- Compatibility
- The functions
resolve
andresolveSerialised
should be compatible:serialiseValue (resolve v1 v2) == resolveSerialised (Proxy @v) (serialiseValue v1) (serialiseValue v2)
- Associativity
- The function
resolve
should be associative:serialiseValue (v1 `resolve` (v2 `resolve` v3)) == serialiseValue ((v1 `resolve` v2) `resolve` v3)
- Valid Output
- The function
resolveSerialised
should only return deserialisable values:deserialiseValue (resolveSerialised (Proxy @v) rb1 rb2) `deepseq` True
Minimal complete definition
Methods
resolve :: v -> v -> v Source #
Combine two values.
default resolve :: SerialiseValue v => v -> v -> v Source #
resolveSerialised :: Proxy v -> RawBytes -> RawBytes -> RawBytes Source #
Combine two serialised values.
The user may assume that the input bytes are valid and can be deserialised using deserialiseValue
.
The inputs are only ever produced by serialiseValue
and resolveSerialised
.
default resolveSerialised :: SerialiseValue v => Proxy v -> RawBytes -> RawBytes -> RawBytes Source #
Instances
(Num v, SerialiseValue v) => ResolveValue (Sum v) Source # | |
ResolveValue (ResolveAsFirst v) Source # | |
Defined in Database.LSMTree.Internal.Types Methods resolve :: ResolveAsFirst v -> ResolveAsFirst v -> ResolveAsFirst v Source # resolveSerialised :: Proxy (ResolveAsFirst v) -> RawBytes -> RawBytes -> RawBytes Source # | |
(SerialiseValue v, Semigroup v) => ResolveValue (ResolveViaSemigroup v) Source # | |
Defined in Database.LSMTree.Internal.Types Methods resolve :: ResolveViaSemigroup v -> ResolveViaSemigroup v -> ResolveViaSemigroup v Source # resolveSerialised :: Proxy (ResolveViaSemigroup v) -> RawBytes -> RawBytes -> RawBytes Source # |
newtype ResolveViaSemigroup v Source #
Wrapper that provides an instance of ResolveValue
via the Semigroup
instance of the underlying type.
resolve (ResolveViaSemigroup v1) (ResolveViaSemigroup v2) = ResolveViaSemigroup (v1 <> v2)
Constructors
ResolveViaSemigroup v |
Instances
newtype ResolveAsFirst v Source #
Wrapper that provides an instance of ResolveValue
such that upsert
behaves as insert
.
The name ResolveAsFirst
is in reference to the wrapper First
from Data.Semigroup.
resolve = const
Constructors
ResolveAsFirst | |
Fields
|
Instances
Show v => Show (ResolveAsFirst v) Source # | |
Defined in Database.LSMTree.Internal.Types Methods showsPrec :: Int -> ResolveAsFirst v -> ShowS # show :: ResolveAsFirst v -> String # showList :: [ResolveAsFirst v] -> ShowS # | |
Eq v => Eq (ResolveAsFirst v) Source # | |
Defined in Database.LSMTree.Internal.Types Methods (==) :: ResolveAsFirst v -> ResolveAsFirst v -> Bool # (/=) :: ResolveAsFirst v -> ResolveAsFirst v -> Bool # | |
SerialiseValue v => SerialiseValue (ResolveAsFirst v) Source # | |
Defined in Database.LSMTree.Internal.Types Methods serialiseValue :: ResolveAsFirst v -> RawBytes Source # deserialiseValue :: RawBytes -> ResolveAsFirst v Source # | |
ResolveValue (ResolveAsFirst v) Source # | |
Defined in Database.LSMTree.Internal.Types Methods resolve :: ResolveAsFirst v -> ResolveAsFirst v -> ResolveAsFirst v Source # resolveSerialised :: Proxy (ResolveAsFirst v) -> RawBytes -> RawBytes -> RawBytes Source # |
Monoidal Value Resolution Property Tests
resolveCompatibility :: (SerialiseValue v, ResolveValue v) => v -> v -> Bool Source #
Test the Compatibility law for the ResolveValue
class.
resolveValidOutput :: (SerialiseValue v, ResolveValue v, NFData v) => v -> v -> Bool Source #
Test the Valid Output law for the ResolveValue
class.
resolveAssociativity :: (SerialiseValue v, ResolveValue v) => v -> v -> v -> Bool Source #
Test the Associativity law for the ResolveValue
class.
Errors
data SessionDirDoesNotExistError Source #
The session directory does not exist.
Constructors
ErrSessionDirDoesNotExist !FsErrorPath |
Instances
Exception SessionDirDoesNotExistError Source # | |
Show SessionDirDoesNotExistError Source # | |
Defined in Database.LSMTree.Internal.Unsafe Methods showsPrec :: Int -> SessionDirDoesNotExistError -> ShowS # show :: SessionDirDoesNotExistError -> String # showList :: [SessionDirDoesNotExistError] -> ShowS # | |
Eq SessionDirDoesNotExistError Source # | |
Defined in Database.LSMTree.Internal.Unsafe Methods (==) :: SessionDirDoesNotExistError -> SessionDirDoesNotExistError -> Bool # (/=) :: SessionDirDoesNotExistError -> SessionDirDoesNotExistError -> Bool # |
data SessionDirLockedError Source #
The session directory is locked by another active session.
Constructors
ErrSessionDirLocked !FsErrorPath |
Instances
Exception SessionDirLockedError Source # | |
Defined in Database.LSMTree.Internal.Unsafe | |
Show SessionDirLockedError Source # | |
Defined in Database.LSMTree.Internal.Unsafe Methods showsPrec :: Int -> SessionDirLockedError -> ShowS # show :: SessionDirLockedError -> String # showList :: [SessionDirLockedError] -> ShowS # | |
Eq SessionDirLockedError Source # | |
Defined in Database.LSMTree.Internal.Unsafe Methods (==) :: SessionDirLockedError -> SessionDirLockedError -> Bool # (/=) :: SessionDirLockedError -> SessionDirLockedError -> Bool # |
data SessionDirCorruptedError Source #
The session directory is corrupted, e.g., it misses required files or contains unexpected files.
Constructors
ErrSessionDirCorrupted !FsErrorPath |
Instances
Exception SessionDirCorruptedError Source # | |
Defined in Database.LSMTree.Internal.Unsafe | |
Show SessionDirCorruptedError Source # | |
Defined in Database.LSMTree.Internal.Unsafe Methods showsPrec :: Int -> SessionDirCorruptedError -> ShowS # show :: SessionDirCorruptedError -> String # showList :: [SessionDirCorruptedError] -> ShowS # | |
Eq SessionDirCorruptedError Source # | |
Defined in Database.LSMTree.Internal.Unsafe Methods (==) :: SessionDirCorruptedError -> SessionDirCorruptedError -> Bool # (/=) :: SessionDirCorruptedError -> SessionDirCorruptedError -> Bool # |
data SessionClosedError Source #
The session is closed.
Constructors
ErrSessionClosed |
Instances
Exception SessionClosedError Source # | |
Defined in Database.LSMTree.Internal.Unsafe Methods toException :: SessionClosedError -> SomeException # fromException :: SomeException -> Maybe SessionClosedError # | |
Show SessionClosedError Source # | |
Defined in Database.LSMTree.Internal.Unsafe Methods showsPrec :: Int -> SessionClosedError -> ShowS # show :: SessionClosedError -> String # showList :: [SessionClosedError] -> ShowS # | |
Eq SessionClosedError Source # | |
Defined in Database.LSMTree.Internal.Unsafe Methods (==) :: SessionClosedError -> SessionClosedError -> Bool # (/=) :: SessionClosedError -> SessionClosedError -> Bool # |
data TableClosedError Source #
The table is closed.
Constructors
ErrTableClosed |
Instances
Exception TableClosedError Source # | |
Defined in Database.LSMTree.Internal.Unsafe Methods toException :: TableClosedError -> SomeException # | |
Show TableClosedError Source # | |
Defined in Database.LSMTree.Internal.Unsafe Methods showsPrec :: Int -> TableClosedError -> ShowS # show :: TableClosedError -> String # showList :: [TableClosedError] -> ShowS # | |
Eq TableClosedError Source # | |
Defined in Database.LSMTree.Internal.Unsafe Methods (==) :: TableClosedError -> TableClosedError -> Bool # (/=) :: TableClosedError -> TableClosedError -> Bool # |
data TableCorruptedError Source #
The table data is corrupted.
Constructors
ErrLookupByteCountDiscrepancy | |
Instances
Exception TableCorruptedError Source # | |
Defined in Database.LSMTree.Internal.Lookup Methods toException :: TableCorruptedError -> SomeException # fromException :: SomeException -> Maybe TableCorruptedError # | |
Show TableCorruptedError Source # | |
Defined in Database.LSMTree.Internal.Lookup Methods showsPrec :: Int -> TableCorruptedError -> ShowS # show :: TableCorruptedError -> String # showList :: [TableCorruptedError] -> ShowS # | |
Eq TableCorruptedError Source # | |
Defined in Database.LSMTree.Internal.Lookup Methods (==) :: TableCorruptedError -> TableCorruptedError -> Bool # (/=) :: TableCorruptedError -> TableCorruptedError -> Bool # |
data TableTooLargeError Source #
The table contains a run that has more than \(2^{40}\) physical entries.
Constructors
ErrTableTooLarge |
Instances
Exception TableTooLargeError Source # | |
Defined in Database.LSMTree.Internal.MergingRun Methods toException :: TableTooLargeError -> SomeException # fromException :: SomeException -> Maybe TableTooLargeError # | |
Show TableTooLargeError Source # | |
Defined in Database.LSMTree.Internal.MergingRun Methods showsPrec :: Int -> TableTooLargeError -> ShowS # show :: TableTooLargeError -> String # showList :: [TableTooLargeError] -> ShowS # | |
Eq TableTooLargeError Source # | |
Defined in Database.LSMTree.Internal.MergingRun Methods (==) :: TableTooLargeError -> TableTooLargeError -> Bool # (/=) :: TableTooLargeError -> TableTooLargeError -> Bool # |
data TableUnionNotCompatibleError Source #
A table union was constructed with two tables that are not compatible.
Constructors
ErrTableUnionHandleTypeMismatch | |
ErrTableUnionSessionMismatch | |
Fields
|
Instances
data SnapshotExistsError Source #
The named snapshot already exists.
Constructors
ErrSnapshotExists !SnapshotName |
Instances
Exception SnapshotExistsError Source # | |
Defined in Database.LSMTree.Internal.Unsafe Methods toException :: SnapshotExistsError -> SomeException # fromException :: SomeException -> Maybe SnapshotExistsError # | |
Show SnapshotExistsError Source # | |
Defined in Database.LSMTree.Internal.Unsafe Methods showsPrec :: Int -> SnapshotExistsError -> ShowS # show :: SnapshotExistsError -> String # showList :: [SnapshotExistsError] -> ShowS # | |
Eq SnapshotExistsError Source # | |
Defined in Database.LSMTree.Internal.Unsafe Methods (==) :: SnapshotExistsError -> SnapshotExistsError -> Bool # (/=) :: SnapshotExistsError -> SnapshotExistsError -> Bool # |
data SnapshotDoesNotExistError Source #
The named snapshot does not exist.
Constructors
ErrSnapshotDoesNotExist !SnapshotName |
Instances
Exception SnapshotDoesNotExistError Source # | |
Defined in Database.LSMTree.Internal.Unsafe | |
Show SnapshotDoesNotExistError Source # | |
Defined in Database.LSMTree.Internal.Unsafe Methods showsPrec :: Int -> SnapshotDoesNotExistError -> ShowS # show :: SnapshotDoesNotExistError -> String # showList :: [SnapshotDoesNotExistError] -> ShowS # | |
Eq SnapshotDoesNotExistError Source # | |
Defined in Database.LSMTree.Internal.Unsafe Methods (==) :: SnapshotDoesNotExistError -> SnapshotDoesNotExistError -> Bool # (/=) :: SnapshotDoesNotExistError -> SnapshotDoesNotExistError -> Bool # |
data SnapshotCorruptedError Source #
The named snapshot is corrupted.
Constructors
ErrSnapshotCorrupted !SnapshotName !FileCorruptedError |
Instances
Exception SnapshotCorruptedError Source # | |
Defined in Database.LSMTree.Internal.Unsafe | |
Show SnapshotCorruptedError Source # | |
Defined in Database.LSMTree.Internal.Unsafe Methods showsPrec :: Int -> SnapshotCorruptedError -> ShowS # show :: SnapshotCorruptedError -> String # showList :: [SnapshotCorruptedError] -> ShowS # | |
Eq SnapshotCorruptedError Source # | |
Defined in Database.LSMTree.Internal.Unsafe Methods (==) :: SnapshotCorruptedError -> SnapshotCorruptedError -> Bool # (/=) :: SnapshotCorruptedError -> SnapshotCorruptedError -> Bool # |
data SnapshotNotCompatibleError Source #
The named snapshot is not compatible with the expected type.
Constructors
ErrSnapshotWrongLabel | The named snapshot is not compatible with the given label. |
Fields
|
Instances
Exception SnapshotNotCompatibleError Source # | |
Show SnapshotNotCompatibleError Source # | |
Defined in Database.LSMTree.Internal.Unsafe Methods showsPrec :: Int -> SnapshotNotCompatibleError -> ShowS # show :: SnapshotNotCompatibleError -> String # showList :: [SnapshotNotCompatibleError] -> ShowS # | |
Eq SnapshotNotCompatibleError Source # | |
Defined in Database.LSMTree.Internal.Unsafe Methods (==) :: SnapshotNotCompatibleError -> SnapshotNotCompatibleError -> Bool # (/=) :: SnapshotNotCompatibleError -> SnapshotNotCompatibleError -> Bool # |
data BlobRefInvalidError Source #
A BlobRef
used with retrieveBlobs
was invalid.
BlobRef
s are obtained from lookups in a Table
, but they may be
invalidated by subsequent changes in that Table
. In general the
reliable way to retrieve blobs is not to change the Table
before
retrieving the blobs. To allow later retrievals, duplicate the table
before making modifications and keep the table open until all blob
retrievals are complete.
Constructors
ErrBlobRefInvalid !Int | The |
Instances
Exception BlobRefInvalidError Source # | |
Defined in Database.LSMTree.Internal.Unsafe Methods toException :: BlobRefInvalidError -> SomeException # fromException :: SomeException -> Maybe BlobRefInvalidError # | |
Show BlobRefInvalidError Source # | |
Defined in Database.LSMTree.Internal.Unsafe Methods showsPrec :: Int -> BlobRefInvalidError -> ShowS # show :: BlobRefInvalidError -> String # showList :: [BlobRefInvalidError] -> ShowS # | |
Eq BlobRefInvalidError Source # | |
Defined in Database.LSMTree.Internal.Unsafe Methods (==) :: BlobRefInvalidError -> BlobRefInvalidError -> Bool # (/=) :: BlobRefInvalidError -> BlobRefInvalidError -> Bool # |
data CursorClosedError Source #
The cursor is closed.
Constructors
ErrCursorClosed |
Instances
Exception CursorClosedError Source # | |
Defined in Database.LSMTree.Internal.Unsafe Methods toException :: CursorClosedError -> SomeException # | |
Show CursorClosedError Source # | |
Defined in Database.LSMTree.Internal.Unsafe Methods showsPrec :: Int -> CursorClosedError -> ShowS # show :: CursorClosedError -> String # showList :: [CursorClosedError] -> ShowS # | |
Eq CursorClosedError Source # | |
Defined in Database.LSMTree.Internal.Unsafe Methods (==) :: CursorClosedError -> CursorClosedError -> Bool # (/=) :: CursorClosedError -> CursorClosedError -> Bool # |
data InvalidSnapshotNameError Source #
Constructors
ErrInvalidSnapshotName !String |
Instances
Exception InvalidSnapshotNameError Source # | |
Defined in Database.LSMTree.Internal.Paths | |
Show InvalidSnapshotNameError Source # | |
Defined in Database.LSMTree.Internal.Paths Methods showsPrec :: Int -> InvalidSnapshotNameError -> ShowS # show :: InvalidSnapshotNameError -> String # showList :: [InvalidSnapshotNameError] -> ShowS # | |
Eq InvalidSnapshotNameError Source # | |
Defined in Database.LSMTree.Internal.Paths Methods (==) :: InvalidSnapshotNameError -> InvalidSnapshotNameError -> Bool # (/=) :: InvalidSnapshotNameError -> InvalidSnapshotNameError -> Bool # |
Traces
data Tracer (m :: Type -> Type) a Source #
This type describes some effect in m
which depends upon some value of
type a
, for which the output value is not of interest (only the effects).
The motivating use case is to describe tracing, logging, monitoring, and similar features, in which the programmer wishes to provide some values to some other program which will do some real world side effect, such as writing to a log file or bumping a counter in some monitoring system.
The actual implementation of such a program will probably work on rather
large, domain-agnostic types like Text
, ByteString
, JSON values for
structured logs, etc.
But the call sites which ultimately invoke these implementations will deal with smaller, domain-specific types that concisely describe events, metrics, debug information, etc.
This difference is reconciled by the Contravariant
instance for Tracer
.
contramap
is used to change the input type of
a tracer. This allows for a more general tracer to be used where a more
specific one is expected.
Intuitively: if you can map your domain-specific type Event
to a Text
representation, then any Tracer m Text
can stand in where a
Tracer m Event
is required.
eventToText :: Event -> Text traceTextToLogFile :: Tracer m Text traceEventToLogFile :: Tracer m Event traceEventToLogFile = contramap eventToText traceTextToLogFile
Effectful tracers that actually do interesting stuff can be defined
using emit
, and composed via contramap
.
The nullTracer
can be used as a stand-in for any tracer, doing no
side-effects and producing no interesting value.
To deal with branching, the arrow interface on the underlying
Tracer
should be used. Arrow notation can be helpful
here.
For example, a common pattern is to trace only some variants of a sum type.
data Event = This Int | That Bool traceOnlyThat :: Tracer m Int -> Tracer m Bool traceOnlyThat tr = Tracer $ proc event -> do case event of This i -> use tr -< i That _ -> squelch -< ()
The key point of using the arrow representation we have here is that this
tracer will not necessarily need to force event
: if the input tracer tr
does not force its value, then event
will not be forced. To elaborate,
suppose tr
is nullTracer
. Then this expression becomes
classify (This i) = Left i classify (That _) = Right () traceOnlyThat tr = Tracer $ Pure classify >>> (squelch ||| squelch) >>> Pure (either id id) = Tracer $ Pure classify >>> Pure (either (const (Left ())) (const (Right ()))) >>> Pure (either id id) = Tracer $ Pure (classify >>> either (const (Left ())) (const (Right ())) >>> either id id)
So that when this tracer is run by traceWith
we get
traceWith (traceOnlyThat tr) x = traceWith (Pure _) = pure ()
It is _essential_ that the computation of the tracing effects cannot itself
have side-effects, as this would ruin the ability to short-circuit when
it is known that no tracing will be done: the side-effects of a branch
could change the outcome of another branch. This would fly in the face of
a crucial design goal: you can leave your tracer calls in the program so
they do not bitrot, but can also make them zero runtime cost by substituting
nullTracer
appropriately.
data LSMTreeTrace Source #
Constructors
Instances
Show LSMTreeTrace Source # | |
Defined in Database.LSMTree.Internal.Unsafe Methods showsPrec :: Int -> LSMTreeTrace -> ShowS # show :: LSMTreeTrace -> String # showList :: [LSMTreeTrace] -> ShowS # |
data TableTrace Source #
Constructors
TraceCreateTable TableConfig | A table is created with the specified config. This message is traced in addition to messages like |
TraceCloseTable | |
TraceLookups Int | |
TraceRangeLookup (Range SerialisedKey) | |
TraceUpdates Int | |
TraceSnapshot SnapshotName | |
TraceDuplicate | |
TraceRemainingUnionDebt | |
TraceSupplyUnionCredits UnionCredits |
Instances
Show TableTrace Source # | |
Defined in Database.LSMTree.Internal.Unsafe Methods showsPrec :: Int -> TableTrace -> ShowS # show :: TableTrace -> String # showList :: [TableTrace] -> ShowS # |
data CursorTrace Source #
Constructors
TraceCreateCursor TableId | |
TraceCloseCursor | |
TraceReadCursor Int |
Instances
Show CursorTrace Source # | |
Defined in Database.LSMTree.Internal.Unsafe Methods showsPrec :: Int -> CursorTrace -> ShowS # show :: CursorTrace -> String # showList :: [CursorTrace] -> ShowS # |
data MergeTrace Source #
Constructors
TraceFlushWriteBuffer | |
Fields
| |
TraceAddLevel | |
TraceAddRun | |
TraceNewMerge | |
Fields
| |
TraceNewMergeSingleRun | |
Fields
| |
TraceCompletedMerge | |
Fields
| |
TraceExpectCompletedMerge RunNumber | This is traced at the latest point the merge could complete. |
Instances
Show MergeTrace Source # | |
Defined in Database.LSMTree.Internal.MergeSchedule Methods showsPrec :: Int -> MergeTrace -> ShowS # show :: MergeTrace -> String # showList :: [MergeTrace] -> ShowS # |
Instances
Enum LevelNo Source # | |
Show LevelNo Source # | |
NFData LevelNo Source # | |
Defined in Database.LSMTree.Internal.Config | |
Eq LevelNo Source # | |
Ord LevelNo Source # | |
Defined in Database.LSMTree.Internal.Config |
newtype NumEntries Source #
A count of entries, for example the number of entries in a run.
This number is limited by the machine's word size. On 32-bit systems, the
maximum number we can represent is 2^31
which is roughly 2 billion. This
should be a sufficiently large limit that we never reach it in practice. By
extension for 64-bit and higher-bit systems this limit is also sufficiently
large.
Constructors
NumEntries Int |
Instances
Instances
Show RunNumber Source # | |
NFData RunNumber Source # | |
Defined in Database.LSMTree.Internal.RunNumber | |
Eq RunNumber Source # | |
Ord RunNumber Source # | |
Defined in Database.LSMTree.Internal.RunNumber | |
DecodeVersioned RunNumber Source # | |
Defined in Database.LSMTree.Internal.Snapshot.Codec Methods decodeVersioned :: SnapshotVersion -> Decoder s RunNumber Source # | |
Encode RunNumber Source # | |
data MergePolicyForLevel Source #
Constructors
LevelTiering | |
LevelLevelling |
Instances
Show MergePolicyForLevel Source # | |
Defined in Database.LSMTree.Internal.IncomingRun Methods showsPrec :: Int -> MergePolicyForLevel -> ShowS # show :: MergePolicyForLevel -> String # showList :: [MergePolicyForLevel] -> ShowS # | |
NFData MergePolicyForLevel Source # | |
Defined in Database.LSMTree.Internal.IncomingRun Methods rnf :: MergePolicyForLevel -> () # | |
Eq MergePolicyForLevel Source # | |
Defined in Database.LSMTree.Internal.IncomingRun Methods (==) :: MergePolicyForLevel -> MergePolicyForLevel -> Bool # (/=) :: MergePolicyForLevel -> MergePolicyForLevel -> Bool # | |
DecodeVersioned MergePolicyForLevel Source # | |
Defined in Database.LSMTree.Internal.Snapshot.Codec Methods decodeVersioned :: SnapshotVersion -> Decoder s MergePolicyForLevel Source # | |
Encode MergePolicyForLevel Source # | |
Defined in Database.LSMTree.Internal.Snapshot.Codec Methods encode :: MergePolicyForLevel -> Encoding Source # |
data LevelMergeType Source #
Different types of merges created as part of a regular (non-union) level.
A last level merge behaves differently from a mid-level merge: last level
merges can actually remove delete operations, whereas mid-level merges must
preserve them. This is orthogonal to the MergePolicy
.
Constructors
MergeMidLevel | |
MergeLastLevel |
Instances
Show LevelMergeType Source # | |
Defined in Database.LSMTree.Internal.Merge Methods showsPrec :: Int -> LevelMergeType -> ShowS # show :: LevelMergeType -> String # showList :: [LevelMergeType] -> ShowS # | |
NFData LevelMergeType Source # | |
Defined in Database.LSMTree.Internal.Merge Methods rnf :: LevelMergeType -> () # | |
Eq LevelMergeType Source # | |
Defined in Database.LSMTree.Internal.Merge Methods (==) :: LevelMergeType -> LevelMergeType -> Bool # (/=) :: LevelMergeType -> LevelMergeType -> Bool # | |
IsMergeType LevelMergeType Source # | |
Defined in Database.LSMTree.Internal.Merge | |
DecodeVersioned LevelMergeType Source # | |
Defined in Database.LSMTree.Internal.Snapshot.Codec Methods decodeVersioned :: SnapshotVersion -> Decoder s LevelMergeType Source # | |
Encode LevelMergeType Source # | |
Defined in Database.LSMTree.Internal.Snapshot.Codec Methods encode :: LevelMergeType -> Encoding Source # |
Constructors
RunParams | |
Fields |
Instances
Show RunParams Source # | |
NFData RunParams Source # | |
Defined in Database.LSMTree.Internal.RunBuilder | |
Eq RunParams Source # | |
DecodeVersioned RunParams Source # | |
Defined in Database.LSMTree.Internal.Snapshot.Codec Methods decodeVersioned :: SnapshotVersion -> Decoder s RunParams Source # | |
Encode RunParams Source # | |
data RunDataCaching Source #
Should this run cache key/ops data in memory?
Constructors
CacheRunData | |
NoCacheRunData |
Instances
Show RunDataCaching Source # | |
Defined in Database.LSMTree.Internal.RunBuilder Methods showsPrec :: Int -> RunDataCaching -> ShowS # show :: RunDataCaching -> String # showList :: [RunDataCaching] -> ShowS # | |
NFData RunDataCaching Source # | |
Defined in Database.LSMTree.Internal.RunBuilder Methods rnf :: RunDataCaching -> () # | |
Eq RunDataCaching Source # | |
Defined in Database.LSMTree.Internal.RunBuilder Methods (==) :: RunDataCaching -> RunDataCaching -> Bool # (/=) :: RunDataCaching -> RunDataCaching -> Bool # | |
DecodeVersioned RunDataCaching Source # | |
Defined in Database.LSMTree.Internal.Snapshot.Codec Methods decodeVersioned :: SnapshotVersion -> Decoder s RunDataCaching Source # | |
Encode RunDataCaching Source # | |
Defined in Database.LSMTree.Internal.Snapshot.Codec Methods encode :: RunDataCaching -> Encoding Source # |
The type of supported index types.
Instances
Show IndexType Source # | |
NFData IndexType Source # | |
Defined in Database.LSMTree.Internal.Index | |
Eq IndexType Source # | |
DecodeVersioned IndexType Source # | |
Defined in Database.LSMTree.Internal.Snapshot.Codec Methods decodeVersioned :: SnapshotVersion -> Decoder s IndexType Source # | |
Encode IndexType Source # | |