| Copyright | (c) 2023-2025 Cardano Development Foundation | 
|---|---|
| License | Apache-2.0 | 
| Stability | experimental | 
| Portability | portable | 
| Safe Haskell | Safe-Inferred | 
| Language | GHC2021 | 
Database.LSMTree
Description
Synopsis
- type IOLike m = (MonadAsync m, MonadMVar m, MonadThrow m, MonadThrow (STM m), MonadCatch m, MonadMask m, PrimMonad m, MonadST m, MonadEvaluate m)
 - data Session (m :: Type -> Type)
 - withOpenSession :: forall m h a. (IOLike m, Typeable h) => Tracer m LSMTreeTrace -> HasFS m h -> HasBlockIO m h -> Salt -> FsPath -> (Session m -> m a) -> m a
 - withOpenSessionIO :: Tracer IO LSMTreeTrace -> FilePath -> (Session IO -> IO a) -> IO a
 - withNewSession :: forall m h a. (IOLike m, Typeable h) => Tracer m LSMTreeTrace -> HasFS m h -> HasBlockIO m h -> Salt -> FsPath -> (Session m -> m a) -> m a
 - withRestoreSession :: forall m h a. (IOLike m, Typeable h) => Tracer m LSMTreeTrace -> HasFS m h -> HasBlockIO m h -> FsPath -> (Session m -> m a) -> m a
 - openSession :: forall m h. (IOLike m, Typeable h) => Tracer m LSMTreeTrace -> HasFS m h -> HasBlockIO m h -> Salt -> FsPath -> m (Session m)
 - newSession :: forall m h. (IOLike m, Typeable h) => Tracer m LSMTreeTrace -> HasFS m h -> HasBlockIO m h -> Salt -> FsPath -> m (Session m)
 - restoreSession :: forall m h. (IOLike m, Typeable h) => Tracer m LSMTreeTrace -> HasFS m h -> HasBlockIO m h -> FsPath -> m (Session m)
 - closeSession :: forall m. IOLike m => Session m -> m ()
 - data Table (m :: Type -> Type) k v b
 - withTable :: forall m k v b a. IOLike m => Session m -> (Table m k v b -> m a) -> m a
 - withTableWith :: forall m k v b a. IOLike m => TableConfig -> Session m -> (Table m k v b -> m a) -> m a
 - newTable :: forall m k v b. IOLike m => Session m -> m (Table m k v b)
 - newTableWith :: forall m k v b. IOLike m => TableConfig -> Session m -> m (Table m k v b)
 - closeTable :: forall m k v b. IOLike m => Table m k v b -> m ()
 - member :: forall m k v b. IOLike m => (SerialiseKey k, SerialiseValue v, ResolveValue v) => Table m k v b -> k -> m Bool
 - members :: forall m k v b. IOLike m => (SerialiseKey k, SerialiseValue v, ResolveValue v) => Table m k v b -> Vector k -> m (Vector Bool)
 - data LookupResult v b
- = NotFound
 - | Found !v
 - | FoundWithBlob !v !b
 
 - getValue :: LookupResult v b -> Maybe v
 - getBlob :: LookupResult v b -> Maybe b
 - lookup :: forall m k v b. IOLike m => (SerialiseKey k, SerialiseValue v, ResolveValue v) => Table m k v b -> k -> m (LookupResult v (BlobRef m b))
 - lookups :: forall m k v b. IOLike m => (SerialiseKey k, SerialiseValue v, ResolveValue v) => Table m k v b -> Vector k -> m (Vector (LookupResult v (BlobRef m b)))
 - data Entry k v b
- = Entry !k !v
 - | EntryWithBlob !k !v !b
 
 - getEntryKey :: Entry k v b -> k
 - getEntryValue :: Entry k v b -> v
 - getEntryBlob :: Entry k v b -> Maybe b
 - rangeLookup :: forall m k v b. IOLike m => (SerialiseKey k, SerialiseValue v, ResolveValue v) => Table m k v b -> Range k -> m (Vector (Entry k v (BlobRef m b)))
 - insert :: forall m k v b. IOLike m => (SerialiseKey k, SerialiseValue v, ResolveValue v, SerialiseValue b) => Table m k v b -> k -> v -> Maybe b -> m ()
 - inserts :: forall m k v b. IOLike m => (SerialiseKey k, SerialiseValue v, ResolveValue v, SerialiseValue b) => Table m k v b -> Vector (k, v, Maybe b) -> m ()
 - upsert :: forall m k v b. IOLike m => (SerialiseKey k, SerialiseValue v, ResolveValue v, SerialiseValue b) => Table m k v b -> k -> v -> m ()
 - upserts :: forall m k v b. IOLike m => (SerialiseKey k, SerialiseValue v, ResolveValue v, SerialiseValue b) => Table m k v b -> Vector (k, v) -> m ()
 - delete :: forall m k v b. IOLike m => (SerialiseKey k, SerialiseValue v, ResolveValue v, SerialiseValue b) => Table m k v b -> k -> m ()
 - deletes :: forall m k v b. IOLike m => (SerialiseKey k, SerialiseValue v, ResolveValue v, SerialiseValue b) => Table m k v b -> Vector k -> m ()
 - data Update v b
 - update :: forall m k v b. IOLike m => (SerialiseKey k, SerialiseValue v, ResolveValue v, SerialiseValue b) => Table m k v b -> k -> Update v b -> m ()
 - updates :: forall m k v b. IOLike m => (SerialiseKey k, SerialiseValue v, ResolveValue v, SerialiseValue b) => Table m k v b -> Vector (k, Update v b) -> m ()
 - withDuplicate :: forall m k v b a. IOLike m => Table m k v b -> (Table m k v b -> m a) -> m a
 - duplicate :: forall m k v b. IOLike m => Table m k v b -> m (Table m k v b)
 - withUnion :: forall m k v b a. IOLike m => ResolveValue v => Table m k v b -> Table m k v b -> (Table m k v b -> m a) -> m a
 - withUnions :: forall m k v b a. IOLike m => ResolveValue v => NonEmpty (Table m k v b) -> (Table m k v b -> m a) -> m a
 - union :: forall m k v b. IOLike m => ResolveValue v => Table m k v b -> Table m k v b -> m (Table m k v b)
 - unions :: forall m k v b. IOLike m => ResolveValue v => NonEmpty (Table m k v b) -> m (Table m k v b)
 - withIncrementalUnion :: forall m k v b a. IOLike m => Table m k v b -> Table m k v b -> (Table m k v b -> m a) -> m a
 - withIncrementalUnions :: forall m k v b a. IOLike m => NonEmpty (Table m k v b) -> (Table m k v b -> m a) -> m a
 - incrementalUnion :: forall m k v b. IOLike m => Table m k v b -> Table m k v b -> m (Table m k v b)
 - incrementalUnions :: forall m k v b. IOLike m => NonEmpty (Table m k v b) -> m (Table m k v b)
 - remainingUnionDebt :: forall m k v b. IOLike m => Table m k v b -> m UnionDebt
 - supplyUnionCredits :: forall m k v b. IOLike m => ResolveValue v => Table m k v b -> UnionCredits -> m UnionCredits
 - data BlobRef (m :: Type -> Type) b
 - retrieveBlob :: forall m b. (IOLike m, SerialiseValue b) => Session m -> BlobRef m b -> m b
 - retrieveBlobs :: forall m b. (IOLike m, SerialiseValue b) => Session m -> Vector (BlobRef m b) -> m (Vector b)
 - data Cursor (m :: Type -> Type) k v b
 - withCursor :: forall m k v b a. IOLike m => ResolveValue v => Table m k v b -> (Cursor m k v b -> m a) -> m a
 - withCursorAtOffset :: forall m k v b a. IOLike m => (SerialiseKey k, ResolveValue v) => Table m k v b -> k -> (Cursor m k v b -> m a) -> m a
 - newCursor :: forall m k v b. IOLike m => ResolveValue v => Table m k v b -> m (Cursor m k v b)
 - newCursorAtOffset :: forall m k v b. IOLike m => (SerialiseKey k, ResolveValue v) => Table m k v b -> k -> m (Cursor m k v b)
 - closeCursor :: forall m k v b. IOLike m => Cursor m k v b -> m ()
 - next :: forall m k v b. IOLike m => (SerialiseKey k, SerialiseValue v, ResolveValue v) => Cursor m k v b -> m (Maybe (Entry k v (BlobRef m b)))
 - take :: forall m k v b. IOLike m => (SerialiseKey k, SerialiseValue v, ResolveValue v) => Int -> Cursor m k v b -> m (Vector (Entry k v (BlobRef m b)))
 - takeWhile :: forall m k v b. IOLike m => (SerialiseKey k, SerialiseValue v, ResolveValue v) => Int -> (k -> Bool) -> Cursor m k v b -> m (Vector (Entry k v (BlobRef m b)))
 - saveSnapshot :: forall m k v b. IOLike m => SnapshotName -> SnapshotLabel -> Table m k v b -> m ()
 - withTableFromSnapshot :: forall m k v b a. IOLike m => ResolveValue v => Session m -> SnapshotName -> SnapshotLabel -> (Table m k v b -> m a) -> m a
 - withTableFromSnapshotWith :: forall m k v b a. IOLike m => ResolveValue v => TableConfigOverride -> Session m -> SnapshotName -> SnapshotLabel -> (Table m k v b -> m a) -> m a
 - openTableFromSnapshot :: forall m k v b. IOLike m => ResolveValue v => Session m -> SnapshotName -> SnapshotLabel -> m (Table m k v b)
 - openTableFromSnapshotWith :: forall m k v b. IOLike m => ResolveValue v => TableConfigOverride -> Session m -> SnapshotName -> SnapshotLabel -> m (Table m k v b)
 - doesSnapshotExist :: forall m. IOLike m => Session m -> SnapshotName -> m Bool
 - deleteSnapshot :: forall m. IOLike m => Session m -> SnapshotName -> m ()
 - listSnapshots :: forall m. IOLike m => Session m -> m [SnapshotName]
 - data SnapshotName
 - isValidSnapshotName :: String -> Bool
 - toSnapshotName :: String -> SnapshotName
 - newtype SnapshotLabel = SnapshotLabel Text
 - type Salt = Word64
 - data TableConfig
 - defaultTableConfig :: TableConfig
 - data MergePolicy = LazyLevelling
 - data MergeSchedule
 - data SizeRatio = Four
 - data WriteBufferAlloc = AllocNumEntries !Int
 - data BloomFilterAlloc
 - data FencePointerIndexType
 - data DiskCachePolicy
 - newtype MergeBatchSize = MergeBatchSize Int
 - data TableConfigOverride = TableConfigOverride {}
 - noTableConfigOverride :: TableConfigOverride
 - data Range k
- = FromToExcluding k k
 - | FromToIncluding k k
 
 - newtype UnionCredits = UnionCredits Int
 - newtype UnionDebt = UnionDebt Int
 - newtype RawBytes = RawBytes (Vector Word8)
 - class SerialiseKey k where
- serialiseKey :: k -> RawBytes
 - deserialiseKey :: RawBytes -> k
 
 - class SerialiseKey k => SerialiseKeyOrderPreserving k
 - class SerialiseValue v where
- serialiseValue :: v -> RawBytes
 - deserialiseValue :: RawBytes -> v
 
 - serialiseKeyIdentity :: (Eq k, SerialiseKey k) => k -> Bool
 - serialiseKeyIdentityUpToSlicing :: (Eq k, SerialiseKey k) => RawBytes -> k -> RawBytes -> Bool
 - serialiseKeyPreservesOrdering :: (Ord k, SerialiseKey k) => k -> k -> Bool
 - serialiseValueIdentity :: (Eq v, SerialiseValue v) => v -> Bool
 - serialiseValueIdentityUpToSlicing :: (Eq v, SerialiseValue v) => RawBytes -> v -> RawBytes -> Bool
 - packSlice :: RawBytes -> RawBytes -> RawBytes -> RawBytes
 - class ResolveValue v where
 - newtype ResolveViaSemigroup v = ResolveViaSemigroup v
 - newtype ResolveAsFirst v = ResolveAsFirst {
- unResolveAsFirst :: v
 
 - resolveCompatibility :: (SerialiseValue v, ResolveValue v) => v -> v -> Bool
 - resolveValidOutput :: (SerialiseValue v, ResolveValue v, NFData v) => v -> v -> Bool
 - resolveAssociativity :: (SerialiseValue v, ResolveValue v) => v -> v -> v -> Bool
 - data SessionDirDoesNotExistError = ErrSessionDirDoesNotExist !FsErrorPath
 - data SessionDirLockedError = ErrSessionDirLocked !FsErrorPath
 - data SessionDirCorruptedError = ErrSessionDirCorrupted !Text !FsErrorPath
 - data SessionClosedError = ErrSessionClosed
 - data TableClosedError = ErrTableClosed
 - data TableCorruptedError = ErrLookupByteCountDiscrepancy !ByteCount !ByteCount
 - data TableTooLargeError = ErrTableTooLarge
 - data TableUnionNotCompatibleError
 - data SnapshotExistsError = ErrSnapshotExists !SnapshotName
 - data SnapshotDoesNotExistError = ErrSnapshotDoesNotExist !SnapshotName
 - data SnapshotCorruptedError = ErrSnapshotCorrupted !SnapshotName !FileCorruptedError
 - data SnapshotNotCompatibleError = ErrSnapshotWrongLabel !SnapshotName !SnapshotLabel !SnapshotLabel
 - data BlobRefInvalidError = ErrBlobRefInvalid !Int
 - data CursorClosedError = ErrCursorClosed
 - data InvalidSnapshotNameError = ErrInvalidSnapshotName !String
 - data Tracer (m :: Type -> Type) a
 - data LSMTreeTrace
 - data SessionTrace
 - data TableTrace
- = TraceCreatedTable SessionId TableConfig
 - | TraceNewTable TableConfig
 - | TraceCloseTable
 - | TraceClosedTable
 - | TraceLookups Int
 - | TraceRangeLookup (Range SerialisedKey)
 - | TraceUpdates Int
 - | TraceUpdated Int
 - | TraceOpenTableFromSnapshot SnapshotName TableConfigOverride
 - | TraceSaveSnapshot SnapshotName
 - | TraceSavedSnapshot SnapshotName
 - | TraceDuplicate TableId
 - | TraceIncrementalUnions (NonEmpty TableId)
 - | TraceRemainingUnionDebt
 - | TraceSupplyUnionCredits UnionCredits
 - | TraceSuppliedUnionCredits UnionCredits UnionCredits
 - | TraceMerge (AtLevel MergeTrace)
 
 - data CursorTrace
 - newtype SessionId = SessionId FsPath
 - newtype TableId = TableId Int
 - newtype CursorId = CursorId Int
 
Usage Notes
This section focuses on the differences between the full API as defined in this module and the simple API as defined in Database.LSMTree.Simple. It assumes that the reader is familiar with Usage Notes for the simple API, which discusses crucial topics such as Resource Management, Concurrency, ACID properties, and Sharing.
Real and Simulated IO
type IOLike m = (MonadAsync m, MonadMVar m, MonadThrow m, MonadThrow (STM m), MonadCatch m, MonadMask m, PrimMonad m, MonadST m, MonadEvaluate m) Source #
Examples
The examples in this module use the preamble described in this section, which does three things:
- It imports this module qualified, as intended, as well as any other relevant modules.
 - It defines types for keys, values, and BLOBs.
 - It defines a helper function that runs examples with access to an open session and fresh table.
 
Importing Database.LSMTree
This module is intended to be imported qualified, to avoid name clashes with Prelude functions.
>>>import Database.LSMTree (BlobRef, Cursor, RawBytes, ResolveValue (..), SerialiseKey (..), SerialiseValue (..), Session, Table)>>>import qualified Database.LSMTree as LSMT
Defining key, value, and BLOB types
The examples in this module use the types Key, Value, and Blob for keys, values and BLOBs.
>>>import Data.ByteString (ByteString)>>>import Data.ByteString.Short (ShortByteString)>>>import Data.Proxy (Proxy)>>>import Data.String (IsString)>>>import Data.Word (Word64)
The type Key is a newtype wrapper around Word64.
The required instance of SerialiseKey is derived by GeneralisedNewtypeDeriving from the preexisting instance for Word64.
>>>:{newtype Key = Key Word64 deriving stock (Eq, Ord, Show) deriving newtype (Num, SerialiseKey) :}
The type Value is a newtype wrapper around ShortByteString.
The required instance of SerialiseValue is derived by GeneralisedNewtypeDeriving from the preexisting instance for ShortByteString.
>>>:{newtype Value = Value ShortByteString deriving stock (Eq, Show) deriving newtype (IsString, SerialiseValue) :}
The type Value has an instance of ResolveValue which appends the new value to the old value separated by a space.
It is sufficient to define either resolve or resolveSerialised,
as each can be defined in terms of the other and serialiseValue/deserialiseValue.
For optimal performance, you should always define resolveSerialised manually.
NOTE:
The first argument of resolve and resolveSerialised is the new value and the second argument is the old value.
>>>:{instance ResolveValue Value where resolve :: Value -> Value -> Value resolve (Value new) (Value old) = Value (new <> " " <> old) resolveSerialised :: Proxy Value -> RawBytes -> RawBytes -> RawBytes resolveSerialised _ new old = new <> " " <> old :}
The type Blob is a newtype wrapper around ByteString,
The required instance of SerialiseValue is derived by GeneralisedNewtypeDeriving from the preexisting instance for ByteString.
>>>:{newtype Blob = Blob ByteString deriving stock (Eq, Show) deriving newtype (IsString, SerialiseValue) :}
Defining a helper function to run examples
The examples in this module are wrapped in a call to runExample,
which creates a temporary session directory and
runs the example with access to an open Session and a fresh Table.
>>>import Control.Exception (bracket, bracket_)>>>import Data.Foldable (traverse_)>>>import qualified System.Directory as Dir>>>import System.FilePath ((</>))>>>import System.Process (getCurrentPid)>>>:{runExample :: (Session IO -> Table IO Key Value Blob -> IO a) -> IO a runExample action = do tmpDir <- Dir.getTemporaryDirectory pid <- getCurrentPid let sessionDir = tmpDir </> "doctest_Database_LSMTree" </> show pid let createSessionDir = Dir.createDirectoryIfMissing True sessionDir let removeSessionDir = Dir.removeDirectoryRecursive sessionDir bracket_ createSessionDir removeSessionDir $ do LSMT.withOpenSessionIO mempty sessionDir $ \session -> do LSMT.withTable session $ \table -> action session table :}
Sessions
data Session (m :: Type -> Type) Source #
A session stores context that is shared by multiple tables.
Each session is associated with one session directory where the files containing table data are stored. Each session locks its session directory. There can only be one active session for each session directory at a time. If a database is must be accessed from multiple parts of a program, one session should be opened and shared between those parts of the program. Session directories cannot be shared between OS processes.
A session may contain multiple tables, which may each have a different configuration and different key, value, and BLOB types. Furthermore, sessions may contain both simple and full-featured tables.
Arguments
| :: forall m h a. (IOLike m, Typeable h) | |
| => Tracer m LSMTreeTrace | |
| -> HasFS m h | |
| -> HasBlockIO m h | |
| -> Salt | The session salt.  | 
| -> FsPath | The session directory.  | 
| -> (Session m -> m a) | |
| -> m a | 
Run an action with access to a session opened from a session directory.
If the session directory is empty, a new session is created using the given salt. Otherwise, the session directory is restored as an existing session ignoring the given salt.
If there are no open tables or cursors when the session terminates, then the disk I/O complexity of this operation is \(O(1)\).
Otherwise, closeTable is called for each open table and closeCursor is called for each open cursor.
Consequently, the worst-case disk I/O complexity of this operation depends on the merge policy of the open tables in the session.
The following assumes all tables in the session have the same merge policy:
LazyLevelling- \(O(o \: T \log_T \frac{n}{B})\).
 
The variable \(o\) refers to the number of open tables and cursors in the session.
This function is exception-safe for both synchronous and asynchronous exceptions.
It is recommended to use this function instead of openSession and closeSession.
Throws the following exceptions:
SessionDirDoesNotExistError- If the session directory does not exist.
 SessionDirLockedError- If the session directory is locked by another process.
 SessionDirCorruptedError- If the session directory is malformed.
 
withOpenSessionIO :: Tracer IO LSMTreeTrace -> FilePath -> (Session IO -> IO a) -> IO a Source #
Variant of withOpenSession that is specialised to IO using the real filesystem.
Arguments
| :: forall m h a. (IOLike m, Typeable h) | |
| => Tracer m LSMTreeTrace | |
| -> HasFS m h | |
| -> HasBlockIO m h | |
| -> Salt | The session salt.  | 
| -> FsPath | The session directory.  | 
| -> (Session m -> m a) | |
| -> m a | 
Run an action with access to a new session.
The session directory must be empty.
If there are no open tables or cursors when the session terminates, then the disk I/O complexity of this operation is \(O(1)\).
Otherwise, closeTable is called for each open table and closeCursor is called for each open cursor.
Consequently, the worst-case disk I/O complexity of this operation depends on the merge policy of the open tables in the session.
The following assumes all tables in the session have the same merge policy:
LazyLevelling- \(O(o \: T \log_T \frac{n}{B})\).
 
The variable \(o\) refers to the number of open tables and cursors in the session.
This function is exception-safe for both synchronous and asynchronous exceptions.
It is recommended to use this function instead of newSession and closeSession.
Throws the following exceptions:
SessionDirDoesNotExistError- If the session directory does not exist.
 SessionDirLockedError- If the session directory is locked by another process.
 SessionDirCorruptedError- If the session directory is malformed.
 
Arguments
| :: forall m h a. (IOLike m, Typeable h) | |
| => Tracer m LSMTreeTrace | |
| -> HasFS m h | |
| -> HasBlockIO m h | |
| -> FsPath | The session directory.  | 
| -> (Session m -> m a) | |
| -> m a | 
Run an action with access to a restored session.
The session directory must be non-empty: a session must have previously been opened and closed in this directory.
If there are no open tables or cursors when the session terminates, then the disk I/O complexity of this operation is \(O(1)\).
Otherwise, closeTable is called for each open table and closeCursor is called for each open cursor.
Consequently, the worst-case disk I/O complexity of this operation depends on the merge policy of the open tables in the session.
The following assumes all tables in the session have the same merge policy:
LazyLevelling- \(O(o \: T \log_T \frac{n}{B})\).
 
The variable \(o\) refers to the number of open tables and cursors in the session.
This function is exception-safe for both synchronous and asynchronous exceptions.
It is recommended to use this function instead of restoreSession and closeSession.
Throws the following exceptions:
SessionDirDoesNotExistError- If the session directory does not exist.
 SessionDirLockedError- If the session directory is locked by another process.
 SessionDirCorruptedError- If the session directory is malformed.
 
Arguments
| :: forall m h. (IOLike m, Typeable h) | |
| => Tracer m LSMTreeTrace | |
| -> HasFS m h | |
| -> HasBlockIO m h | |
| -> Salt | The session salt.  | 
| -> FsPath | The session directory.  | 
| -> m (Session m) | 
Open a session from a session directory.
If the session directory is empty, a new session is created using the given salt. Otherwise, the session directory is restored as an existing session ignoring the given salt.
The worst-case disk I/O complexity of this operation is \(O(1)\).
Warning: Sessions hold open resources and must be closed using closeSession.
Throws the following exceptions:
SessionDirDoesNotExistError- If the session directory does not exist.
 SessionDirLockedError- If the session directory is locked by another process.
 SessionDirCorruptedError- If the session directory is malformed.
 
Arguments
| :: forall m h. (IOLike m, Typeable h) | |
| => Tracer m LSMTreeTrace | |
| -> HasFS m h | |
| -> HasBlockIO m h | |
| -> Salt | The session salt.  | 
| -> FsPath | The session directory.  | 
| -> m (Session m) | 
Create a new session.
The session directory must be empty.
The worst-case disk I/O complexity of this operation is \(O(1)\).
Warning: Sessions hold open resources and must be closed using closeSession.
Throws the following exceptions:
SessionDirDoesNotExistError- If the session directory does not exist.
 SessionDirLockedError- If the session directory is locked by another process.
 SessionDirCorruptedError- If the session directory is malformed.
 
Arguments
| :: forall m h. (IOLike m, Typeable h) | |
| => Tracer m LSMTreeTrace | |
| -> HasFS m h | |
| -> HasBlockIO m h | |
| -> FsPath | The session directory.  | 
| -> m (Session m) | 
Restore a session from a session directory.
The session directory must be non-empty: a session must have previously been opened (and closed) in this directory.
The worst-case disk I/O complexity of this operation is \(O(1)\).
Warning: Sessions hold open resources and must be closed using closeSession.
Throws the following exceptions:
SessionDirDoesNotExistError- If the session directory does not exist.
 SessionDirLockedError- If the session directory is locked by another process.
 SessionDirCorruptedError- If the session directory is malformed.
 
closeSession :: forall m. IOLike m => Session m -> m () Source #
Close a session.
If there are no open tables or cursors in the session, then the disk I/O complexity of this operation is \(O(1)\).
Otherwise, closeTable is called for each open table and closeCursor is called for each open cursor.
Consequently, the worst-case disk I/O complexity of this operation depends on the merge policy of the tables in the session.
The following assumes all tables in the session have the same merge policy:
LazyLevelling- \(O(o \: T \log_T \frac{n}{B})\).
 
The variable \(o\) refers to the number of open tables and cursors in the session.
Closing is idempotent, i.e., closing a closed session does nothing. All other operations on a closed session will throw an exception.
Tables
data Table (m :: Type -> Type) k v b Source #
A table is a handle to an individual LSM-tree key/value store with both in-memory and on-disk parts.
Warning: Tables are ephemeral. Once you close a table, its data is lost forever. To persist tables, use snapshots.
withTable :: forall m k v b a. IOLike m => Session m -> (Table m k v b -> m a) -> m a Source #
Run an action with access to an empty table.
The worst-case disk I/O complexity of this operation depends on the merge policy of the table:
LazyLevelling- \(O(T \log_T \frac{n}{B})\).
 
This function is exception-safe for both synchronous and asynchronous exceptions.
It is recommended to use this function instead of newTable and closeTable.
Throws the following exceptions:
SessionClosedError- If the session is closed.
 
withTableWith :: forall m k v b a. IOLike m => TableConfig -> Session m -> (Table m k v b -> m a) -> m a Source #
Variant of withTable that accepts table configuration.
newTable :: forall m k v b. IOLike m => Session m -> m (Table m k v b) Source #
Create an empty table.
The worst-case disk I/O complexity of this operation is \(O(1)\).
Warning: Tables hold open resources and must be closed using closeTable.
Throws the following exceptions:
SessionClosedError- If the session is closed.
 
newTableWith :: forall m k v b. IOLike m => TableConfig -> Session m -> m (Table m k v b) Source #
Variant of newTable that accepts table configuration.
closeTable :: forall m k v b. IOLike m => Table m k v b -> m () Source #
Close a table.
The worst-case disk I/O complexity of this operation depends on the merge policy of the table:
LazyLevelling- \(O(T \log_T \frac{n}{B})\).
 
Closing is idempotent, i.e., closing a closed table does nothing. All other operations on a closed table will throw an exception.
Warning: Tables are ephemeral. Once you close a table, its data is lost forever. To persist tables, use snapshots.
Table Lookups
member :: forall m k v b. IOLike m => (SerialiseKey k, SerialiseValue v, ResolveValue v) => Table m k v b -> k -> m Bool Source #
Check if the key is a member of the table.
>>>:{runExample $ \session table -> do LSMT.insert table 0 "Hello" Nothing LSMT.insert table 1 "World" Nothing print =<< LSMT.member table 0 :} True
The worst-case disk I/O complexity of this operation depends on the merge policy of the table:
LazyLevelling- \(O(T \log_T \frac{n}{B})\).
 
Membership tests can be performed concurrently from multiple Haskell threads.
Throws the following exceptions:
SessionClosedError- If the session is closed.
 TableClosedError- If the table is closed.
 TableCorruptedError- If the table data is corrupted.
 
members :: forall m k v b. IOLike m => (SerialiseKey k, SerialiseValue v, ResolveValue v) => Table m k v b -> Vector k -> m (Vector Bool) Source #
Variant of member for batch membership tests.
The batch of keys corresponds in-order to the batch of results.
The worst-case disk I/O complexity of this operation depends on the merge policy of the table:
LazyLevelling- \(O(b \: T \log_T \frac{n}{B})\).
 
The variable \(b\) refers to the length of the input vector.
The following property holds in the absence of races:
members table keys = traverse (member table) keys
data LookupResult v b Source #
Constructors
| NotFound | |
| Found !v | |
| FoundWithBlob !v !b | 
Instances
getValue :: LookupResult v b -> Maybe v Source #
Get the field of type v from a , if any.LookupResult v b
getBlob :: LookupResult v b -> Maybe b Source #
Get the field of type b from a , if any.LookupResult v b
The following property holds:
isJust (getBlob result) <= isJust (getValue result)
lookup :: forall m k v b. IOLike m => (SerialiseKey k, SerialiseValue v, ResolveValue v) => Table m k v b -> k -> m (LookupResult v (BlobRef m b)) Source #
Look up the value associated with a key.
>>>:{runExample $ \session table -> do LSMT.insert table 0 "Hello" Nothing LSMT.insert table 1 "World" Nothing print =<< LSMT.lookup table 0 :} Found (Value "Hello")
If the key is not associated with any value, lookup returns NotFound.
>>>:{runExample $ \session table -> do LSMT.lookup table 0 :} NotFound
If the key has an associated BLOB, the result contains a BlobRef.
The full BLOB can be retrieved by passing that BlobRef to retrieveBlob.
>>>:{runExample $ \session table -> do LSMT.insert table 0 "Hello" (Just "World") print =<< traverse (LSMT.retrieveBlob session) =<< LSMT.lookup table 0 :} FoundWithBlob (Value "Hello") (Blob "World")
The worst-case disk I/O complexity of this operation depends on the merge policy of the table:
LazyLevelling- \(O(T \log_T \frac{n}{B})\).
 
Lookups can be performed concurrently from multiple Haskell threads.
Throws the following exceptions:
SessionClosedError- If the session is closed.
 TableClosedError- If the table is closed.
 TableCorruptedError- If the table data is corrupted.
 
lookups :: forall m k v b. IOLike m => (SerialiseKey k, SerialiseValue v, ResolveValue v) => Table m k v b -> Vector k -> m (Vector (LookupResult v (BlobRef m b))) Source #
Variant of lookup for batch lookups.
The batch of keys corresponds in-order to the batch of results.
The worst-case disk I/O complexity of this operation depends on the merge policy of the table:
LazyLevelling- \(O(b \: T \log_T \frac{n}{B})\).
 
The variable \(b\) refers to the length of the input vector.
The following property holds in the absence of races:
lookups table keys = traverse (lookup table) keys
Constructors
| Entry !k !v | |
| EntryWithBlob !k !v !b | 
Instances
| Bifunctor (Entry k) Source # | |
| Foldable (Entry k v) Source # | |
Defined in Database.LSMTree Methods fold :: Monoid m => Entry k v m -> m # foldMap :: Monoid m => (a -> m) -> Entry k v a -> m # foldMap' :: Monoid m => (a -> m) -> Entry k v a -> m # foldr :: (a -> b -> b) -> b -> Entry k v a -> b # foldr' :: (a -> b -> b) -> b -> Entry k v a -> b # foldl :: (b -> a -> b) -> b -> Entry k v a -> b # foldl' :: (b -> a -> b) -> b -> Entry k v a -> b # foldr1 :: (a -> a -> a) -> Entry k v a -> a # foldl1 :: (a -> a -> a) -> Entry k v a -> a # toList :: Entry k v a -> [a] # length :: Entry k v a -> Int # elem :: Eq a => a -> Entry k v a -> Bool # maximum :: Ord a => Entry k v a -> a # minimum :: Ord a => Entry k v a -> a #  | |
| Traversable (Entry k v) Source # | |
Defined in Database.LSMTree  | |
| Functor (Entry k v) Source # | |
| (Show k, Show v, Show b) => Show (Entry k v b) Source # | |
| (NFData k, NFData v, NFData b) => NFData (Entry k v b) Source # | |
Defined in Database.LSMTree  | |
| (Eq k, Eq v, Eq b) => Eq (Entry k v b) Source # | |
getEntryKey :: Entry k v b -> k Source #
Get the field of type k from an .Entry k v b
getEntryValue :: Entry k v b -> v Source #
Get the field of type v from an .Entry k v b
rangeLookup :: forall m k v b. IOLike m => (SerialiseKey k, SerialiseValue v, ResolveValue v) => Table m k v b -> Range k -> m (Vector (Entry k v (BlobRef m b))) Source #
Look up a batch of values associated with keys in the given range.
The worst-case disk I/O complexity of this operation is \(O(T \log_T \frac{n}{B} + \frac{b}{P})\), where the variable \(b\) refers to the length of the output vector.
Range lookups can be performed concurrently from multiple Haskell threads.
Throws the following exceptions:
SessionClosedError- If the session is closed.
 TableClosedError- If the table is closed.
 TableCorruptedError- If the table data is corrupted.
 
Table Updates
insert :: forall m k v b. IOLike m => (SerialiseKey k, SerialiseValue v, ResolveValue v, SerialiseValue b) => Table m k v b -> k -> v -> Maybe b -> m () Source #
Insert associates the given value and BLOB with the given key in the table.
>>>:{runExample $ \session table -> do LSMT.insert table 0 "Hello" Nothing LSMT.insert table 1 "World" Nothing print =<< LSMT.lookup table 0 :} Found (Value "Hello")
Insert may optionally associate a BLOB value with the given key.
>>>:{runExample $ \session table -> do LSMT.insert table 0 "Hello" (Just "World") print =<< traverse (retrieveBlob session) =<< LSMT.lookup table 0 :} FoundWithBlob (Value "Hello") (Blob "World")
Insert overwrites any value and BLOB previously associated with the given key,
even if the given BLOB is Nothing.
>>>:{runExample $ \session table -> do LSMT.insert table 0 "Hello" (Just "World") LSMT.insert table 0 "Goodbye" Nothing print =<< traverse (retrieveBlob session) =<< LSMT.lookup table 0 :} Found (Value "Goodbye")
The worst-case disk I/O complexity of this operation depends on the merge policy and the merge schedule of the table:
LazyLevelling/Incremental- \(O(\frac{1}{P} \: \log_T \frac{n}{B})\).
 LazyLevelling/OneShot- \(O(\frac{n}{P})\).
 
Throws the following exceptions:
SessionClosedError- If the session is closed.
 TableClosedError- If the table is closed.
 
inserts :: forall m k v b. IOLike m => (SerialiseKey k, SerialiseValue v, ResolveValue v, SerialiseValue b) => Table m k v b -> Vector (k, v, Maybe b) -> m () Source #
Variant of insert for batch insertions.
The worst-case disk I/O complexity of this operation depends on the merge policy and the merge schedule of the table:
LazyLevelling/Incremental- \(O(b \: \frac{1}{P} \: \log_T \frac{n}{B})\).
 LazyLevelling/OneShot- \(O(\frac{b}{P} \log_T \frac{b}{B} + \frac{n}{P})\).
 
The variable \(b\) refers to the length of the input vector.
The following property holds in the absence of races:
inserts table entries = traverse_ (uncurry $ insert table) entries
upsert :: forall m k v b. IOLike m => (SerialiseKey k, SerialiseValue v, ResolveValue v, SerialiseValue b) => Table m k v b -> k -> v -> m () Source #
If the given key is not a member of the table, upsert associates the given value with the given key in the table.
Otherwise, upsert updates the value associated with the given key by combining it with the given value using resolve.
>>>:{runExample $ \session table -> do LSMT.upsert table 0 "Hello" LSMT.upsert table 0 "Goodbye" print =<< LSMT.lookup table 0 :} Found (Value "Goodbye Hello")
Warning: Upsert deletes any BLOB previously associated with the given key.
>>>:{runExample $ \session table -> do LSMT.insert table 0 "Hello" (Just "World") LSMT.upsert table 0 "Goodbye" print =<< traverse (LSMT.retrieveBlob session) =<< LSMT.lookup table 0 :} Found (Value "Goodbye Hello")
The worst-case disk I/O complexity of this operation depends on the merge policy and the merge schedule of the table:
LazyLevelling/Incremental- \(O(\frac{1}{P} \: \log_T \frac{n}{B})\).
 LazyLevelling/OneShot- \(O(\frac{n}{P})\).
 
Throws the following exceptions:
SessionClosedError- If the session is closed.
 TableClosedError- If the table is closed.
 
The following property holds in the absence of races:
upsert table k v = do r <- lookup table k let v' = maybe v (resolve v) (getValue r) insert table k v' Nothing
upserts :: forall m k v b. IOLike m => (SerialiseKey k, SerialiseValue v, ResolveValue v, SerialiseValue b) => Table m k v b -> Vector (k, v) -> m () Source #
Variant of upsert for batch insertions.
The worst-case disk I/O complexity of this operation depends on the merge policy and the merge schedule of the table:
LazyLevelling/Incremental- \(O(b \: \frac{1}{P} \: \log_T \frac{n}{B})\).
 LazyLevelling/OneShot- \(O(\frac{b}{P} \log_T \frac{b}{B} + \frac{n}{P})\).
 
The variable \(b\) refers to the length of the input vector.
The following property holds in the absence of races:
upserts table entries = traverse_ (uncurry $ upsert table) entries
delete :: forall m k v b. IOLike m => (SerialiseKey k, SerialiseValue v, ResolveValue v, SerialiseValue b) => Table m k v b -> k -> m () Source #
Delete a key from the table.
>>>:{runExample $ \session table -> do LSMT.insert table 0 "Hello" Nothing LSMT.delete table 0 print =<< LSMT.lookup table 0 :} NotFound
If the key is not a member of the table, the table is left unchanged.
>>>:{runExample $ \session table -> do LSMT.insert table 0 "Hello" Nothing LSMT.delete table 1 print =<< LSMT.lookup table 0 :} Found (Value "Hello")
The worst-case disk I/O complexity of this operation depends on the merge policy and the merge schedule of the table:
LazyLevelling/Incremental- \(O(\frac{1}{P} \: \log_T \frac{n}{B})\).
 LazyLevelling/OneShot- \(O(\frac{n}{P})\).
 
Throws the following exceptions:
SessionClosedError- If the session is closed.
 TableClosedError- If the table is closed.
 
deletes :: forall m k v b. IOLike m => (SerialiseKey k, SerialiseValue v, ResolveValue v, SerialiseValue b) => Table m k v b -> Vector k -> m () Source #
Variant of delete for batch deletions.
The worst-case disk I/O complexity of this operation depends on the merge policy and the merge schedule of the table:
LazyLevelling/Incremental- \(O(b \: \frac{1}{P} \: \log_T \frac{n}{B})\).
 LazyLevelling/OneShot- \(O(\frac{b}{P} \log_T \frac{b}{B} + \frac{n}{P})\).
 
The variable \(b\) refers to the length of the input vector.
The following property holds in the absence of races:
deletes table keys = traverse_ (delete table) keys
update :: forall m k v b. IOLike m => (SerialiseKey k, SerialiseValue v, ResolveValue v, SerialiseValue b) => Table m k v b -> k -> Update v b -> m () Source #
Update generalises insert, delete, and upsert.
The worst-case disk I/O complexity of this operation depends on the merge policy and the merge schedule of the table:
LazyLevelling/Incremental- \(O(\frac{1}{P} \: \log_T \frac{n}{B})\).
 LazyLevelling/OneShot- \(O(\frac{n}{P})\).
 
The following properties hold:
update table k (Insert v mb) = insert table k v mb
update table k Delete = delete table k
update table k (Upsert v) = upsert table k v
Throws the following exceptions:
SessionClosedError- If the session is closed.
 TableClosedError- If the table is closed.
 
updates :: forall m k v b. IOLike m => (SerialiseKey k, SerialiseValue v, ResolveValue v, SerialiseValue b) => Table m k v b -> Vector (k, Update v b) -> m () Source #
Variant of update for batch updates.
The worst-case disk I/O complexity of this operation depends on the merge policy and the merge schedule of the table:
LazyLevelling/Incremental- \(O(b \: \frac{1}{P} \: \log_T \frac{n}{B})\).
 LazyLevelling/OneShot- \(O(\frac{b}{P} \log_T \frac{b}{B} + \frac{n}{P})\).
 
The variable \(b\) refers to the length of the input vector.
The following property holds in the absence of races:
updates table entries = traverse_ (uncurry $ update table) entries
Table Duplication
withDuplicate :: forall m k v b a. IOLike m => Table m k v b -> (Table m k v b -> m a) -> m a Source #
Run an action with access to the duplicate of a table.
The duplicate is an independent copy of the given table. Subsequent updates to the original table do not affect the duplicate, and vice versa.
>>>:{runExample $ \session table -> do LSMT.insert table 0 "Hello" Nothing LSMT.withDuplicate table $ \table' -> do print =<< LSMT.lookup table' 0 LSMT.insert table' 0 "Goodbye" Nothing print =<< LSMT.lookup table' 0 LSMT.lookup table 0 print =<< LSMT.lookup table 0 :} Found (Value "Hello") Found (Value "Goodbye") Found (Value "Hello")
The worst-case disk I/O complexity of this operation depends on the merge policy of the table:
LazyLevelling- \(O(T \log_T \frac{n}{B})\).
 
This function is exception-safe for both synchronous and asynchronous exceptions.
It is recommended to use this function instead of duplicate and closeTable.
Throws the following exceptions:
SessionClosedError- If the session is closed.
 TableClosedError- If the table is closed.
 
duplicate :: forall m k v b. IOLike m => Table m k v b -> m (Table m k v b) Source #
Duplicate a table.
The duplicate is an independent copy of the given table. Subsequent updates to the original table do not affect the duplicate, and vice versa.
>>>:{runExample $ \session table -> do LSMT.insert table 0 "Hello" Nothing bracket (LSMT.duplicate table) LSMT.closeTable $ \table' -> do print =<< LSMT.lookup table' 0 LSMT.insert table' 0 "Goodbye" Nothing print =<< LSMT.lookup table' 0 LSMT.lookup table 0 print =<< LSMT.lookup table 0 :} Found (Value "Hello") Found (Value "Goodbye") Found (Value "Hello")
The worst-case disk I/O complexity of this operation is \(O(0)\).
Warning: The duplicate must be independently closed using closeTable.
Throws the following exceptions:
SessionClosedError- If the session is closed.
 TableClosedError- If the table is closed.
 
Table Unions
withUnion :: forall m k v b a. IOLike m => ResolveValue v => Table m k v b -> Table m k v b -> (Table m k v b -> m a) -> m a Source #
Run an action with access to a table that contains the union of the entries of the given tables.
>>>:{runExample $ \session table1 -> do LSMT.insert table1 0 "Hello" Nothing LSMT.withTable session $ \table2 -> do LSMT.insert table2 0 "World" Nothing LSMT.insert table2 1 "Goodbye" Nothing LSMT.withUnion table1 table2 $ \table3 -> do print =<< LSMT.lookup table3 0 print =<< LSMT.lookup table3 1 print =<< LSMT.lookup table1 0 print =<< LSMT.lookup table2 0 :} Found (Value "Hello World") Found (Value "Goodbye") Found (Value "Hello") Found (Value "World")
The worst-case disk I/O complexity of this operation is \(O(\frac{n}{P})\).
This function is exception-safe for both synchronous and asynchronous exceptions.
It is recommended to use this function instead of union and closeTable.
Warning: Both input tables must be from the same Session.
Warning: This is a relatively expensive operation that may take some time to complete.
See withIncrementalUnion for an incremental alternative.
Throws the following exceptions:
SessionClosedError- If the session is closed.
 TableClosedError- If the table is closed.
 TableUnionNotCompatibleError- If both tables are not from the same 
Session. 
withUnions :: forall m k v b a. IOLike m => ResolveValue v => NonEmpty (Table m k v b) -> (Table m k v b -> m a) -> m a Source #
Variant of withUnions that takes any number of tables.
union :: forall m k v b. IOLike m => ResolveValue v => Table m k v b -> Table m k v b -> m (Table m k v b) Source #
Create a table that contains the union of the entries of the given tables.
If the given key is a member of a single input table, then the same key and value occur in the output table.
Otherwise, the values for duplicate keys are combined using resolve from left to right.
If the resolve function behaves like const, then this computes a left-biased union.
>>>:{runExample $ \session table1 -> do LSMT.insert table1 0 "Hello" Nothing LSMT.withTable session $ \table2 -> do LSMT.insert table2 0 "World" Nothing LSMT.insert table2 1 "Goodbye" Nothing bracket (LSMT.union table1 table2) LSMT.closeTable $ \table3 -> do print =<< LSMT.lookup table3 0 print =<< LSMT.lookup table3 1 print =<< LSMT.lookup table1 0 print =<< LSMT.lookup table2 0 :} Found (Value "Hello World") Found (Value "Goodbye") Found (Value "Hello") Found (Value "World")
The worst-case disk I/O complexity of this operation is \(O(\frac{n}{P})\).
Warning: The new table must be independently closed using closeTable.
Warning: Both input tables must be from the same Session.
Warning: This is a relatively expensive operation that may take some time to complete.
See incrementalUnion for an incremental alternative.
Throws the following exceptions:
SessionClosedError- If the session is closed.
 TableClosedError- If the table is closed.
 TableUnionNotCompatibleError- If both tables are not from the same 
Session. 
unions :: forall m k v b. IOLike m => ResolveValue v => NonEmpty (Table m k v b) -> m (Table m k v b) Source #
Variant of union that takes any number of tables.
withIncrementalUnion :: forall m k v b a. IOLike m => Table m k v b -> Table m k v b -> (Table m k v b -> m a) -> m a Source #
Run an action with access to a table that incrementally computes the union of the given tables.
>>>:{runExample $ \session table1 -> do LSMT.insert table1 0 "Hello" Nothing LSMT.withTable session $ \table2 -> do LSMT.insert table2 0 "World" Nothing LSMT.insert table2 1 "Goodbye" Nothing LSMT.withIncrementalUnion table1 table2 $ \table3 -> do print =<< LSMT.lookup table3 0 print =<< LSMT.lookup table3 1 print =<< LSMT.lookup table1 0 print =<< LSMT.lookup table2 0 :} Found (Value "Hello World") Found (Value "Goodbye") Found (Value "Hello") Found (Value "World")
The worst-case disk I/O complexity of this operation depends on the merge policy of the table:
LazyLevelling- \(O(T \log_T \frac{n}{B})\).
 
This function is exception-safe for both synchronous and asynchronous exceptions.
It is recommended to use this function instead of incrementalUnion and closeTable.
The created table has a union debt which represents the amount of computation that remains. See remainingUnionDebt.
The union debt can be paid off by supplying union credit which performs an amount of computation proportional to the amount of union credit. See supplyUnionCredits.
While a table has unresolved union debt, operations may become more expensive by a factor that scales with the number of unresolved unions.
Warning: Both input tables must be from the same Session.
Throws the following exceptions:
SessionClosedError- If the session is closed.
 TableClosedError- If the table is closed.
 TableUnionNotCompatibleError- If both tables are not from the same 
Session. 
withIncrementalUnions :: forall m k v b a. IOLike m => NonEmpty (Table m k v b) -> (Table m k v b -> m a) -> m a Source #
Variant of withIncrementalUnion that takes any number of tables.
The worst-case disk I/O complexity of this operation depends on the merge policy of the table:
LazyLevelling- \(O(T \log_T \frac{n}{B} + b)\).
 
The variable \(b\) refers to the number of input tables.
incrementalUnion :: forall m k v b. IOLike m => Table m k v b -> Table m k v b -> m (Table m k v b) Source #
Create a table that incrementally computes the union of the given tables.
>>>:{runExample $ \session table1 -> do LSMT.insert table1 0 "Hello" Nothing LSMT.withTable session $ \table2 -> do LSMT.insert table2 0 "World" Nothing LSMT.insert table2 1 "Goodbye" Nothing bracket (LSMT.incrementalUnion table1 table2) LSMT.closeTable $ \table3 -> do print =<< LSMT.lookup table3 0 print =<< LSMT.lookup table3 1 print =<< LSMT.lookup table1 0 print =<< LSMT.lookup table2 0 :} Found (Value "Hello World") Found (Value "Goodbye") Found (Value "Hello") Found (Value "World")
The worst-case disk I/O complexity of this operation is \(O(1)\).
The created table has a union debt which represents the amount of computation that remains. See remainingUnionDebt.
The union debt can be paid off by supplying union credit which performs an amount of computation proportional to the amount of union credit. See supplyUnionCredits.
While a table has unresolved union debt, operations may become more expensive by a factor that scales with the number of unresolved unions.
Warning: The new table must be independently closed using closeTable.
Warning: Both input tables must be from the same Session.
Throws the following exceptions:
SessionClosedError- If the session is closed.
 TableClosedError- If the table is closed.
 TableUnionNotCompatibleError- If both tables are not from the same 
Session. 
incrementalUnions :: forall m k v b. IOLike m => NonEmpty (Table m k v b) -> m (Table m k v b) Source #
Variant of incrementalUnion for any number of tables.
The worst-case disk I/O complexity of this operation is \(O(b)\), where the variable \(b\) refers to the number of input tables.
remainingUnionDebt :: forall m k v b. IOLike m => Table m k v b -> m UnionDebt Source #
Get an upper bound for the amount of remaining union debt. This includes the union debt of any table that was part of the union's input.
>>>:{runExample $ \session table1 -> do LSMT.insert table1 0 "Hello" Nothing LSMT.withTable session $ \table2 -> do LSMT.insert table2 0 "World" Nothing LSMT.insert table2 1 "Goodbye" Nothing bracket (LSMT.incrementalUnion table1 table2) LSMT.closeTable $ \table3 -> do putStrLn . ("UnionDebt: "<>) . show =<< LSMT.remainingUnionDebt table3 :} UnionDebt: 4
The worst-case disk I/O complexity of this operation is \(O(0)\).
Throws the following exceptions:
SessionClosedError- If the session is closed.
 TableClosedError- If the table is closed.
 
supplyUnionCredits :: forall m k v b. IOLike m => ResolveValue v => Table m k v b -> UnionCredits -> m UnionCredits Source #
Supply the given amount of union credits.
This reduces the union debt by at least the number of supplied union credits.
It is therefore advisable to query remainingUnionDebt every once in a while to get an upper bound on the current debt.
This function returns any surplus of union credits as leftover credits when a union has finished. In particular, if the returned number of credits is positive, then the union is finished.
>>>:{runExample $ \session table1 -> do LSMT.insert table1 0 "Hello" Nothing LSMT.withTable session $ \table2 -> do LSMT.insert table2 0 "World" Nothing LSMT.insert table2 1 "Goodbye" Nothing bracket (LSMT.incrementalUnion table1 table2) LSMT.closeTable $ \table3 -> do putStrLn . ("UnionDebt: "<>) . show =<< LSMT.remainingUnionDebt table3 putStrLn . ("Leftovers: "<>) . show =<< LSMT.supplyUnionCredits table3 2 putStrLn . ("UnionDebt: "<>) . show =<< LSMT.remainingUnionDebt table3 putStrLn . ("Leftovers: "<>) . show =<< LSMT.supplyUnionCredits table3 4 :} UnionDebt: 4 Leftovers: 0 UnionDebt: 2 Leftovers: 3
NOTE:
The remainingUnionDebt functions gets an upper bound for the amount of remaning union debt.
In the example above, the second call to remainingUnionDebt reports 2, but the union debt is 1.
Therefore, the second call to supplyUnionCredits returns more leftovers than expected.
The worst-case disk I/O complexity of this operation is \(O(\frac{b}{P})\), where the variable \(b\) refers to the amount of credits supplied.
Throws the following exceptions:
SessionClosedError- If the session is closed.
 TableClosedError- If the table is closed.
 
Blob References
data BlobRef (m :: Type -> Type) b Source #
A blob reference is a reference to an on-disk blob.
Warning: A blob reference is not stable. Any operation that modifies the table, cursor, or session that corresponds to a blob reference may cause it to be invalidated.
The word "blob" in this type comes from the acronym Binary Large Object.
retrieveBlob :: forall m b. (IOLike m, SerialiseValue b) => Session m -> BlobRef m b -> m b Source #
Retrieve the blob value from a blob reference.
>>>:{runExample $ \session table -> do LSMT.insert table 0 "Hello" (Just "World") print =<< traverse (LSMT.retrieveBlob session) =<< LSMT.lookup table 0 :} FoundWithBlob (Value "Hello") (Blob "World")
The worst-case disk I/O complexity of this operation is \(O(1)\).
Warning: A blob reference is not stable. Any operation that modifies the table, cursor, or session that corresponds to a blob reference may cause it to be invalidated.
Throws the following exceptions:
SessionClosedError- If the session is closed.
 BlobRefInvalidError- If the blob reference has been invalidated.
 
retrieveBlobs :: forall m b. (IOLike m, SerialiseValue b) => Session m -> Vector (BlobRef m b) -> m (Vector b) Source #
Variant of retrieveBlob for batch retrieval.
The batch of blob references corresponds in-order to the batch of results.
The worst-case disk I/O complexity of this operation is \(O(b)\), where the variable \(b\) refers to the length of the input vector.
The following property holds in the absence of races:
retrieveBlobs session blobRefs = traverse (retrieveBlob session) blobRefs
Cursors
data Cursor (m :: Type -> Type) k v b Source #
A cursor is a stable read-only iterator for a table.
A cursor iterates over the entries in a table following the order of the serialised keys. After the cursor is created, updates to the referenced table do not affect the cursor.
The name of this type references database cursors, not, e.g., text editor cursors.
withCursor :: forall m k v b a. IOLike m => ResolveValue v => Table m k v b -> (Cursor m k v b -> m a) -> m a Source #
Run an action with access to a cursor.
>>>:{runExample $ \session table -> do LSMT.insert table 0 "Hello" Nothing LSMT.insert table 1 "World" Nothing LSMT.withCursor table $ \cursor -> do traverse_ print =<< LSMT.take 32 cursor :} Entry (Key 0) (Value "Hello") Entry (Key 1) (Value "World")
The worst-case disk I/O complexity of this operation depends on the merge policy of the table:
LazyLevelling- \(O(T \log_T \frac{n}{B})\).
 
This function is exception-safe for both synchronous and asynchronous exceptions.
It is recommended to use this function instead of newCursor and closeCursor.
Throws the following exceptions:
SessionClosedError- If the session is closed.
 TableClosedError- If the table is closed.
 
withCursorAtOffset :: forall m k v b a. IOLike m => (SerialiseKey k, ResolveValue v) => Table m k v b -> k -> (Cursor m k v b -> m a) -> m a Source #
Variant of withCursor that starts at a given key.
>>>:{runExample $ \session table -> do LSMT.insert table 0 "Hello" Nothing LSMT.insert table 1 "World" Nothing LSMT.withCursorAtOffset table 1 $ \cursor -> do traverse_ print =<< LSMT.take 32 cursor :} Entry (Key 1) (Value "World")
newCursor :: forall m k v b. IOLike m => ResolveValue v => Table m k v b -> m (Cursor m k v b) Source #
Create a cursor for the given table.
>>>:{runExample $ \session table -> do LSMT.insert table 0 "Hello" Nothing LSMT.insert table 1 "World" Nothing bracket (LSMT.newCursor table) LSMT.closeCursor $ \cursor -> do traverse_ print =<< LSMT.take 32 cursor :} Entry (Key 0) (Value "Hello") Entry (Key 1) (Value "World")
The worst-case disk I/O complexity of this operation depends on the merge policy of the table:
LazyLevelling- \(O(T \log_T \frac{n}{B})\).
 
Warning: Cursors hold open resources and must be closed using closeCursor.
Throws the following exceptions:
SessionClosedError- If the session is closed.
 TableClosedError- If the table is closed.
 
newCursorAtOffset :: forall m k v b. IOLike m => (SerialiseKey k, ResolveValue v) => Table m k v b -> k -> m (Cursor m k v b) Source #
Variant of newCursor that starts at a given key.
>>>:{runExample $ \session table -> do LSMT.insert table 0 "Hello" Nothing LSMT.insert table 1 "World" Nothing bracket (LSMT.newCursorAtOffset table 1) LSMT.closeCursor $ \cursor -> do traverse_ print =<< LSMT.take 32 cursor :} Entry (Key 1) (Value "World")
closeCursor :: forall m k v b. IOLike m => Cursor m k v b -> m () Source #
Close a cursor.
The worst-case disk I/O complexity of this operation depends on the merge policy of the table:
LazyLevelling- \(O(T \log_T \frac{n}{B})\).
 
Closing is idempotent, i.e., closing a closed cursor does nothing. All other operations on a closed cursor will throw an exception.
next :: forall m k v b. IOLike m => (SerialiseKey k, SerialiseValue v, ResolveValue v) => Cursor m k v b -> m (Maybe (Entry k v (BlobRef m b))) Source #
Read the next table entry from the cursor.
>>>:{runExample $ \session table -> do LSMT.insert table 0 "Hello" Nothing LSMT.insert table 1 "World" Nothing LSMT.withCursor table $ \cursor -> do print =<< LSMT.next cursor print =<< LSMT.next cursor print =<< LSMT.next cursor :} Just (Entry (Key 0) (Value "Hello")) Just (Entry (Key 1) (Value "World")) Nothing
The worst-case disk I/O complexity of this operation is \(O(\frac{1}{P})\).
Throws the following exceptions:
SessionClosedError- If the session is closed.
 CursorClosedError- If the cursor is closed.
 
take :: forall m k v b. IOLike m => (SerialiseKey k, SerialiseValue v, ResolveValue v) => Int -> Cursor m k v b -> m (Vector (Entry k v (BlobRef m b))) Source #
Read the next batch of table entries from the cursor.
>>>:{runExample $ \session table -> do LSMT.insert table 0 "Hello" Nothing LSMT.insert table 1 "World" Nothing LSMT.withCursor table $ \cursor -> do traverse_ print =<< LSMT.take 32 cursor :} Entry (Key 0) (Value "Hello") Entry (Key 1) (Value "World")
The worst-case disk I/O complexity of this operation is \(O(\frac{b}{P})\), where the variable \(b\) refers to the length of the output vector, which is at most equal to the given number. In practice, the length of the output vector is only less than the given number once the cursor reaches the end of the table.
The following property holds:
take n cursor = catMaybes <$> replicateM n (next cursor)
Throws the following exceptions:
SessionClosedError- If the session is closed.
 CursorClosedError- If the cursor is closed.
 
takeWhile :: forall m k v b. IOLike m => (SerialiseKey k, SerialiseValue v, ResolveValue v) => Int -> (k -> Bool) -> Cursor m k v b -> m (Vector (Entry k v (BlobRef m b))) Source #
Variant of take that accepts an additional predicate to determine whether or not to continue reading.
>>>:{runExample $ \session table -> do LSMT.insert table 0 "Hello" Nothing LSMT.insert table 1 "World" Nothing LSMT.withCursor table $ \cursor -> do traverse_ print =<< LSMT.takeWhile 32 (<1) cursor :} Entry (Key 0) (Value "Hello")
The worst-case disk I/O complexity of this operation is \(O(\frac{b}{P})\), where the variable \(b\) refers to the length of the output vector, which is at most equal to the given number. In practice, the length of the output vector is only less than the given number when the predicate returns false or the cursor reaches the end of the table.
The following properties hold:
takeWhile n (const True) cursor = take n cursor
takeWhile n (const False) cursor = pure empty
Throws the following exceptions:
SessionClosedError- If the session is closed.
 CursorClosedError- If the cursor is closed.
 
Snapshots
saveSnapshot :: forall m k v b. IOLike m => SnapshotName -> SnapshotLabel -> Table m k v b -> m () Source #
Save the current state of the table to disk as a snapshot under the given
snapshot name. This is the only mechanism that persists a table. Each snapshot
must have a unique name, which may be used to restore the table from that snapshot
using openTableFromSnapshot.
Saving a snapshot does not close the table.
Saving a snapshot is relatively cheap when compared to opening a snapshot. However, it is not so cheap that one should use it after every operation.
>>>:{runExample $ \session table -> do LSMT.insert table 0 "Hello" Nothing LSMT.insert table 1 "World" Nothing LSMT.saveSnapshot "example" "Key Value Blob" table :}
The worst-case disk I/O complexity of this operation depends on the merge policy of the table:
LazyLevelling- \(O(T \log_T \frac{n}{B})\).
 
Throws the following exceptions:
SessionClosedError- If the session is closed.
 TableClosedError- If the table is closed.
 SnapshotExistsError- If a snapshot with the same name already exists.
 
withTableFromSnapshot :: forall m k v b a. IOLike m => ResolveValue v => Session m -> SnapshotName -> SnapshotLabel -> (Table m k v b -> m a) -> m a Source #
Run an action with access to a table from a snapshot.
>>>:{runExample $ \session table -> do -- Save snapshot LSMT.insert table 0 "Hello" Nothing LSMT.insert table 1 "World" Nothing LSMT.saveSnapshot "example" "Key Value Blob" table -- Open snapshot LSMT.withTableFromSnapshot @_ @Key @Value @Blob session "example" "Key Value Blob" $ \table' -> do LSMT.withCursor table' $ \cursor -> traverse_ print =<< LSMT.take 32 cursor :} Entry (Key 0) (Value "Hello") Entry (Key 1) (Value "World")
The worst-case disk I/O complexity of this operation is \(O(\frac{n}{P})\).
This function is exception-safe for both synchronous and asynchronous exceptions.
It is recommended to use this function instead of openTableFromSnapshot and closeTable.
Throws the following exceptions:
SessionClosedError- If the session is closed.
 TableClosedError- If the table is closed.
 SnapshotDoesNotExistError- If no snapshot with the given name exists.
 SnapshotCorruptedError- If the snapshot data is corrupted.
 SnapshotNotCompatibleError- If the snapshot has a different label or is a different table type.
 
withTableFromSnapshotWith :: forall m k v b a. IOLike m => ResolveValue v => TableConfigOverride -> Session m -> SnapshotName -> SnapshotLabel -> (Table m k v b -> m a) -> m a Source #
Variant of withTableFromSnapshot that accepts table configuration overrides.
openTableFromSnapshot :: forall m k v b. IOLike m => ResolveValue v => Session m -> SnapshotName -> SnapshotLabel -> m (Table m k v b) Source #
Open a table from a named snapshot.
>>>:{runExample $ \session table -> do -- Save snapshot LSMT.insert table 0 "Hello" Nothing LSMT.insert table 1 "World" Nothing LSMT.saveSnapshot "example" "Key Value Blob" table -- Open snapshot bracket (LSMT.openTableFromSnapshot @_ @Key @Value @Blob session "example" "Key Value Blob") LSMT.closeTable $ \table' -> do LSMT.withCursor table' $ \cursor -> traverse_ print =<< LSMT.take 32 cursor :} Entry (Key 0) (Value "Hello") Entry (Key 1) (Value "World")
The worst-case disk I/O complexity of this operation is \(O(\frac{n}{P})\).
Warning: The new table must be independently closed using closeTable.
Throws the following exceptions:
SessionClosedError- If the session is closed.
 TableClosedError- If the table is closed.
 SnapshotDoesNotExistError- If no snapshot with the given name exists.
 SnapshotCorruptedError- If the snapshot data is corrupted.
 SnapshotNotCompatibleError- If the snapshot has a different label or is a different table type.
 
openTableFromSnapshotWith :: forall m k v b. IOLike m => ResolveValue v => TableConfigOverride -> Session m -> SnapshotName -> SnapshotLabel -> m (Table m k v b) Source #
Variant of openTableFromSnapshot that accepts table configuration overrides.
doesSnapshotExist :: forall m. IOLike m => Session m -> SnapshotName -> m Bool Source #
Check if the named snapshot exists.
>>>:{runExample $ \session table -> do -- Save snapshot LSMT.insert table 0 "Hello" Nothing LSMT.insert table 1 "World" Nothing LSMT.saveSnapshot "example" "Key Value Blob" table -- Check snapshots print =<< doesSnapshotExist session "example" print =<< doesSnapshotExist session "this_snapshot_does_not_exist" :} True False
The worst-case disk I/O complexity of this operation is \(O(1)\).
Throws the following exceptions:
SessionClosedError- If the session is closed.
 
deleteSnapshot :: forall m. IOLike m => Session m -> SnapshotName -> m () Source #
Delete the named snapshot.
>>>:{runExample $ \session table -> do -- Save snapshot LSMT.insert table 0 "Hello" Nothing LSMT.insert table 1 "World" Nothing LSMT.saveSnapshot "example" "Key Value Blob" table -- Delete snapshot LSMT.deleteSnapshot session "example" :}
The worst-case disk I/O complexity of this operation depends on the merge policy of the table:
LazyLevelling- \(O(T \log_T \frac{n}{B})\).
 
Throws the following exceptions:
SessionClosedError- If the session is closed.
 SnapshotDoesNotExistError- If no snapshot with the given name exists.
 
listSnapshots :: forall m. IOLike m => Session m -> m [SnapshotName] Source #
List the names of all snapshots.
>>>:{runExample $ \session table -> do -- Save snapshot LSMT.insert table 0 "Hello" Nothing LSMT.insert table 1 "World" Nothing LSMT.saveSnapshot "example" "Key Value Blob" table -- List snapshots traverse_ print =<< listSnapshots session :} "example"
The worst-case disk I/O complexity of this operation is \(O(s)\), where \(s\) refers to the number of snapshots in the session.
Throws the following exceptions:
SessionClosedError- If the session is closed.
 
data SnapshotName Source #
Instances
| IsString SnapshotName | The given string must satisfy   | 
Defined in Database.LSMTree.Internal.Paths Methods fromString :: String -> SnapshotName #  | |
| Show SnapshotName | |
Defined in Database.LSMTree.Internal.Paths Methods showsPrec :: Int -> SnapshotName -> ShowS # show :: SnapshotName -> String # showList :: [SnapshotName] -> ShowS #  | |
| Eq SnapshotName | |
Defined in Database.LSMTree.Internal.Paths  | |
| Ord SnapshotName | |
Defined in Database.LSMTree.Internal.Paths Methods compare :: SnapshotName -> SnapshotName -> Ordering # (<) :: SnapshotName -> SnapshotName -> Bool # (<=) :: SnapshotName -> SnapshotName -> Bool # (>) :: SnapshotName -> SnapshotName -> Bool # (>=) :: SnapshotName -> SnapshotName -> Bool # max :: SnapshotName -> SnapshotName -> SnapshotName # min :: SnapshotName -> SnapshotName -> SnapshotName #  | |
isValidSnapshotName :: String -> Bool Source #
Check if a String would be a valid snapshot name.
Snapshot names consist of lowercase characters, digits, dashes -,
 and underscores _, and must be between 1 and 64 characters long.
 >>> isValidSnapshotName "main"
 True
>>>isValidSnapshotName "temporary-123-test_"True
>>>isValidSnapshotName "UPPER"False>>>isValidSnapshotName "dir/dot.exe"False>>>isValidSnapshotName ".."False>>>isValidSnapshotName "\\"False>>>isValidSnapshotName ""False>>>isValidSnapshotName (replicate 100 'a')False
Snapshot names must be valid directory on both POSIX and Windows. This rules out the following reserved file and directory names on Windows:
>>>isValidSnapshotName "con"False>>>isValidSnapshotName "prn"False>>>isValidSnapshotName "aux"False>>>isValidSnapshotName "nul"False>>>isValidSnapshotName "com1" -- "com2", "com3", etc.False>>>isValidSnapshotName "lpt1" -- "lpt2", "lpt3", etc.False
See, e.g., the VBA docs for the "Bad file name or number" error.
toSnapshotName :: String -> SnapshotName Source #
Create snapshot name.
The given string must satisfy isValidSnapshotName.
Throws the following exceptions:
InvalidSnapshotNameError- If the given string is not a valid snapshot name.
 
newtype SnapshotLabel Source #
Custom, user-supplied text that is included in the metadata.
The main use case for a SnapshotLabel is for the user to supply textual
 information about the key/value/blob type for the table that corresponds to
 the snapshot. This information is used to dynamically check that a snapshot
 is opened at the correct key/value/blob type.
Constructors
| SnapshotLabel Text | 
Instances
| IsString SnapshotLabel | |
Defined in Database.LSMTree.Internal.Snapshot Methods fromString :: String -> SnapshotLabel #  | |
| Show SnapshotLabel | |
Defined in Database.LSMTree.Internal.Snapshot Methods showsPrec :: Int -> SnapshotLabel -> ShowS # show :: SnapshotLabel -> String # showList :: [SnapshotLabel] -> ShowS #  | |
| NFData SnapshotLabel | |
Defined in Database.LSMTree.Internal.Snapshot Methods rnf :: SnapshotLabel -> () #  | |
| Eq SnapshotLabel | |
Defined in Database.LSMTree.Internal.Snapshot Methods (==) :: SnapshotLabel -> SnapshotLabel -> Bool # (/=) :: SnapshotLabel -> SnapshotLabel -> Bool #  | |
Session Configuration
The session salt is used to secure the hash operations in the Bloom filters.
The value of the salt must be kept secret. Otherwise, there are no restrictions on the value.
Table Configuration
data TableConfig Source #
A collection of configuration parameters for tables, which can be used to tune the performance of the table.
To construct a TableConfig, modify the defaultTableConfig, which defines reasonable defaults for all parameters.
For a detailed discussion of fine-tuning the table configuration, see Fine-tuning Table Configuration.
confMergePolicy ::MergePolicy- The merge policy balances the performance of lookups against the performance of updates. Levelling favours lookups. Tiering favours updates. Lazy levelling strikes a middle ground between levelling and tiering, and moderately favours updates. This parameter is explicitly referenced in the documentation of those operations it affects.
 confSizeRatio ::SizeRatio- The size ratio pushes the effects of the merge policy to the extreme. If the size ratio is higher, levelling favours lookups more, and tiering and lazy levelling favour updates more. This parameter is referred to as \(T\) in the disk I/O cost of operations.
 confWriteBufferAlloc ::WriteBufferAlloc- The write buffer capacity balances the performance of lookups and updates against the in-memory size of the database. If the write buffer is larger, it takes up more memory, but lookups and updates are more efficient. This parameter is referred to as \(B\) in the disk I/O cost of operations. Irrespective of this parameter, the write buffer size cannot exceed 4GiB.
 confMergeSchedule ::MergeSchedule- The merge schedule balances the performance of lookups and updates against the consistency of updates. With the one-shot merge schedule, lookups and updates are more efficient overall, but some updates may take much longer than others. With the incremental merge schedule, lookups and updates are less efficient overall, but each update does a similar amount of work. This parameter is explicitly referenced in the documentation of those operations it affects. The merge schedule does not affect the way that table unions are computed. However, any table union must complete all outstanding incremental updates.
 confBloomFilterAlloc ::BloomFilterAlloc- The Bloom filter size balances the performance of lookups against the in-memory size of the database. If the Bloom filters are larger, they take up more memory, but lookup operations are more efficient.
 confFencePointerIndex ::FencePointerIndexType- The fence-pointer index type supports two types of indexes. The ordinary indexes are designed to work with any key. The compact indexes are optimised for the case where the keys in the database are uniformly distributed, e.g., when the keys are hashes.
 confDiskCachePolicy ::DiskCachePolicy- The disk cache policy supports caching lookup operations using the OS page cache. Caching may improve the performance of lookups and updates if database access follows certain patterns.
 confMergeBatchSize ::MergeBatchSize- The merge batch size balances the maximum latency of individual update operations, versus the latency of a sequence of update operations. Bigger batches improves overall performance but some updates will take a lot longer than others. The default is to use a large batch size.
 
Instances
| Show TableConfig | |
Defined in Database.LSMTree.Internal.Config Methods showsPrec :: Int -> TableConfig -> ShowS # show :: TableConfig -> String # showList :: [TableConfig] -> ShowS #  | |
| NFData TableConfig | |
Defined in Database.LSMTree.Internal.Config Methods rnf :: TableConfig -> () #  | |
| Eq TableConfig | |
Defined in Database.LSMTree.Internal.Config  | |
| Override DiskCachePolicy TableConfig | |
Defined in Database.LSMTree.Internal.Config.Override Methods override :: DiskCachePolicy -> TableConfig -> TableConfig  | |
| Override MergeBatchSize TableConfig | |
Defined in Database.LSMTree.Internal.Config.Override Methods override :: MergeBatchSize -> TableConfig -> TableConfig  | |
defaultTableConfig :: TableConfig Source #
The defaultTableConfig defines reasonable defaults for all TableConfig parameters.
>>>confMergePolicy defaultTableConfigLazyLevelling>>>confMergeSchedule defaultTableConfigIncremental>>>confSizeRatio defaultTableConfigFour>>>confWriteBufferAlloc defaultTableConfigAllocNumEntries 20000>>>confBloomFilterAlloc defaultTableConfigAllocRequestFPR 1.0e-3>>>confFencePointerIndex defaultTableConfigOrdinaryIndex>>>confDiskCachePolicy defaultTableConfigDiskCacheAll>>>confMergeBatchSize defaultTableConfigMergeBatchSize 20000
data MergePolicy Source #
The merge policy balances the performance of lookups against the performance of updates. Levelling favours lookups. Tiering favours updates. Lazy levelling strikes a middle ground between levelling and tiering, and moderately favours updates. This parameter is explicitly referenced in the documentation of those operations it affects.
NOTE: This package only supports lazy levelling.
For a detailed discussion of the merge policy, see Fine-tuning: Merge Policy, Size Ratio, and Write Buffer Size.
Constructors
| LazyLevelling | 
Instances
| Show MergePolicy | |
Defined in Database.LSMTree.Internal.Config Methods showsPrec :: Int -> MergePolicy -> ShowS # show :: MergePolicy -> String # showList :: [MergePolicy] -> ShowS #  | |
| NFData MergePolicy | |
Defined in Database.LSMTree.Internal.Config Methods rnf :: MergePolicy -> () #  | |
| Eq MergePolicy | |
Defined in Database.LSMTree.Internal.Config  | |
data MergeSchedule Source #
The merge schedule balances the performance of lookups and updates against the consistency of updates. The merge schedule does not affect the performance of table unions. With the one-shot merge schedule, lookups and updates are more efficient overall, but some updates may take much longer than others. With the incremental merge schedule, lookups and updates are less efficient overall, but each update does a similar amount of work. This parameter is explicitly referenced in the documentation of those operations it affects.
For a detailed discussion of the effect of the merge schedule, see Fine-tuning: Merge Schedule.
Constructors
| OneShot | The   | 
| Incremental | The   | 
Instances
| Show MergeSchedule | |
Defined in Database.LSMTree.Internal.Config Methods showsPrec :: Int -> MergeSchedule -> ShowS # show :: MergeSchedule -> String # showList :: [MergeSchedule] -> ShowS #  | |
| NFData MergeSchedule | |
Defined in Database.LSMTree.Internal.Config Methods rnf :: MergeSchedule -> () #  | |
| Eq MergeSchedule | |
Defined in Database.LSMTree.Internal.Config Methods (==) :: MergeSchedule -> MergeSchedule -> Bool # (/=) :: MergeSchedule -> MergeSchedule -> Bool #  | |
The size ratio pushes the effects of the merge policy to the extreme. If the size ratio is higher, levelling favours lookups more, and tiering and lazy levelling favour updates more. This parameter is referred to as \(T\) in the disk I/O cost of operations.
NOTE: This package only supports a size ratio of four.
For a detailed discussion of the size ratio, see Fine-tuning: Merge Policy, Size Ratio, and Write Buffer Size.
Constructors
| Four | 
data WriteBufferAlloc Source #
The write buffer capacity balances the performance of lookups and updates against the in-memory size of the table. If the write buffer is larger, it takes up more memory, but lookups and updates are more efficient. Irrespective of this parameter, the write buffer size cannot exceed 4GiB.
For a detailed discussion of the size ratio, see Fine-tuning: Merge Policy, Size Ratio, and Write Buffer Size.
Constructors
| AllocNumEntries !Int | Allocate space for the in-memory write buffer to fit the requested number of entries. This parameter is referred to as \(B\) in the disk I/O cost of operations.  | 
Instances
| Show WriteBufferAlloc | |
Defined in Database.LSMTree.Internal.Config Methods showsPrec :: Int -> WriteBufferAlloc -> ShowS # show :: WriteBufferAlloc -> String # showList :: [WriteBufferAlloc] -> ShowS #  | |
| NFData WriteBufferAlloc | |
Defined in Database.LSMTree.Internal.Config Methods rnf :: WriteBufferAlloc -> () #  | |
| Eq WriteBufferAlloc | |
Defined in Database.LSMTree.Internal.Config Methods (==) :: WriteBufferAlloc -> WriteBufferAlloc -> Bool # (/=) :: WriteBufferAlloc -> WriteBufferAlloc -> Bool #  | |
data BloomFilterAlloc Source #
The Bloom filter size balances the performance of lookups against the in-memory size of the table. If the Bloom filters are larger, they take up more memory, but lookup operations are more efficient.
For a detailed discussion of the Bloom filter size, see Fine-tuning: Bloom Filter Size.
Constructors
| AllocFixed !Double | Allocate the requested number of bits per entry in the table. The value must strictly positive, but fractional values are permitted. The recommended range is \([2, 24]\).  | 
| AllocRequestFPR !Double | Allocate the required number of bits per entry to get the requested false-positive rate. The value must be in the range \((0, 1)\). The recommended range is \([1\mathrm{e}{ -5 },1\mathrm{e}{ -2 }]\).  | 
Instances
| Show BloomFilterAlloc | |
Defined in Database.LSMTree.Internal.Config Methods showsPrec :: Int -> BloomFilterAlloc -> ShowS # show :: BloomFilterAlloc -> String # showList :: [BloomFilterAlloc] -> ShowS #  | |
| NFData BloomFilterAlloc | |
Defined in Database.LSMTree.Internal.Config Methods rnf :: BloomFilterAlloc -> () #  | |
| Eq BloomFilterAlloc | |
Defined in Database.LSMTree.Internal.Config Methods (==) :: BloomFilterAlloc -> BloomFilterAlloc -> Bool # (/=) :: BloomFilterAlloc -> BloomFilterAlloc -> Bool #  | |
data FencePointerIndexType Source #
The fence-pointer index type supports two types of indexes. The ordinary indexes are designed to work with any key. The compact indexes are optimised for the case where the keys in the database are uniformly distributed, e.g., when the keys are hashes.
For a detailed discussion the fence-pointer index types, see Fine-tuning: Fence-Pointer Index Type.
Constructors
| OrdinaryIndex | Ordinary indexes are designed to work with any key. When using an ordinary index, the   | 
| CompactIndex | Compact indexes are designed for the case where the keys in the database are uniformly distributed, e.g., when the keys are hashes. When using a compact index, some requirements apply to serialised keys: 
  | 
Instances
| Show FencePointerIndexType | |
Defined in Database.LSMTree.Internal.Config Methods showsPrec :: Int -> FencePointerIndexType -> ShowS # show :: FencePointerIndexType -> String # showList :: [FencePointerIndexType] -> ShowS #  | |
| NFData FencePointerIndexType | |
Defined in Database.LSMTree.Internal.Config Methods rnf :: FencePointerIndexType -> () #  | |
| Eq FencePointerIndexType | |
Defined in Database.LSMTree.Internal.Config Methods (==) :: FencePointerIndexType -> FencePointerIndexType -> Bool # (/=) :: FencePointerIndexType -> FencePointerIndexType -> Bool #  | |
data DiskCachePolicy Source #
The disk cache policy determines if lookup operations use the OS page cache. Caching may improve the performance of lookups if database access follows certain patterns.
For a detailed discussion the disk cache policy, see Fine-tuning: Disk Cache Policy.
Constructors
| DiskCacheAll | Cache all data in the table. Use this policy if the database's access pattern has either good spatial locality or both good spatial and temporal locality.  | 
| DiskCacheLevelOneTo !Int | Cache the data in the freshest  Use this policy if the database's access pattern only has good temporal locality. The variable   | 
| DiskCacheNone | Do not cache any table data. Use this policy if the database's access pattern has does not have good spatial or temporal locality. For instance, if the access pattern is uniformly random.  | 
Instances
| Show DiskCachePolicy | |
Defined in Database.LSMTree.Internal.Config Methods showsPrec :: Int -> DiskCachePolicy -> ShowS # show :: DiskCachePolicy -> String # showList :: [DiskCachePolicy] -> ShowS #  | |
| NFData DiskCachePolicy | |
Defined in Database.LSMTree.Internal.Config Methods rnf :: DiskCachePolicy -> () #  | |
| Eq DiskCachePolicy | |
Defined in Database.LSMTree.Internal.Config Methods (==) :: DiskCachePolicy -> DiskCachePolicy -> Bool # (/=) :: DiskCachePolicy -> DiskCachePolicy -> Bool #  | |
| Override DiskCachePolicy TableConfig | |
Defined in Database.LSMTree.Internal.Config.Override Methods override :: DiskCachePolicy -> TableConfig -> TableConfig  | |
| Override DiskCachePolicy SnapshotMetaData | |
Defined in Database.LSMTree.Internal.Config.Override Methods override :: DiskCachePolicy -> SnapshotMetaData -> SnapshotMetaData  | |
| Override DiskCachePolicy (SnapLevels SnapshotRun) | |
Defined in Database.LSMTree.Internal.Config.Override Methods override :: DiskCachePolicy -> SnapLevels SnapshotRun -> SnapLevels SnapshotRun  | |
newtype MergeBatchSize Source #
The merge batch size is a micro-tuning parameter, and in most cases you do need to think about it and can leave it at its default.
When using the Incremental merge schedule, merging is done in batches. This
is a trade-off: larger batches tends to mean better overall performance but the
downside is that while most updates (inserts, deletes, upserts) are fast, some
are slower (when a batch of merging work has to be done).
If you care most about the maximum latency of updates, then use a small batch
size. If you don't care about latency of individual operations, just the
latency of the overall sequence of operations then use a large batch size. The
default is to use a large batch size, the same size as the write buffer itself.
The minimum batch size is 1. The maximum batch size is the size of the write
buffer confWriteBufferAlloc.
Note that the actual batch size is the minimum of this configuration
parameter and the size of the batch of operations performed (e.g. inserts).
So if you consistently use large batches, you can use a batch size of 1 and
the merge batch size will always be determined by the operation batch size.
A further reason why it may be preferable to use minimal batch sizes is to get good parallel work balance, when using parallelism.
Constructors
| MergeBatchSize Int | 
Instances
Table Configuration Overrides
data TableConfigOverride Source #
The TableConfigOverride can be used to override the TableConfig
when opening a table from a snapshot.
Constructors
| TableConfigOverride | |
Instances
| Show TableConfigOverride | |
Defined in Database.LSMTree.Internal.Config.Override Methods showsPrec :: Int -> TableConfigOverride -> ShowS # show :: TableConfigOverride -> String # showList :: [TableConfigOverride] -> ShowS #  | |
| Eq TableConfigOverride | |
Defined in Database.LSMTree.Internal.Config.Override Methods (==) :: TableConfigOverride -> TableConfigOverride -> Bool # (/=) :: TableConfigOverride -> TableConfigOverride -> Bool #  | |
| Override TableConfigOverride SnapshotMetaData | |
Defined in Database.LSMTree.Internal.Config.Override Methods override :: TableConfigOverride -> SnapshotMetaData -> SnapshotMetaData  | |
noTableConfigOverride :: TableConfigOverride Source #
No override of the TableConfig. You can use this as a default value and
 record update to override some parameters, while being future-proof to new
 parameters, e.g.
noTableConfigOverride { overrideDiskCachePolicy = DiskCacheNone }Ranges
A range of keys.
Constructors
| FromToExcluding k k | 
  | 
| FromToIncluding k k | 
  | 
Union Credit and Debt
newtype UnionCredits Source #
Union credits are passed to supplyUnionCredits to perform some amount of computation to incrementally complete a union.
Constructors
| UnionCredits Int | 
Instances
Union debt represents the amount of computation that must be performed before an incremental union is completed. This includes the cost of completing incremental unions that were part of a union's input.
Warning: The UnionDebt returned by remainingUnionDebt is an upper bound on the remaining union debt, not the exact union debt.
Instances
| Num UnionDebt | |
Defined in Database.LSMTree.Internal.Unsafe  | |
| Show UnionDebt | |
| Eq UnionDebt | |
| Ord UnionDebt | |
Defined in Database.LSMTree.Internal.Unsafe  | |
Key/Value Serialisation
Raw bytes.
This type imposes no alignment constraint and provides no guarantee of whether the memory is pinned or unpinned.
Instances
| IsString RawBytes | 
 Warning:   | 
Defined in Database.LSMTree.Internal.RawBytes Methods fromString :: String -> RawBytes #  | |
| Monoid RawBytes | 
 
  | 
| Semigroup RawBytes | 
 
  | 
| IsList RawBytes | 
 
  | 
| Show RawBytes | |
| Hashable RawBytes | |
Defined in Database.LSMTree.Internal.RawBytes  | |
| NFData RawBytes | |
Defined in Database.LSMTree.Internal.RawBytes  | |
| Eq RawBytes | |
| Ord RawBytes | This instance uses lexicographic ordering.  | 
Defined in Database.LSMTree.Internal.RawBytes  | |
| type Item RawBytes | |
Defined in Database.LSMTree.Internal.RawBytes  | |
class SerialiseKey k where Source #
Serialisation of keys.
Instances should satisfy the following laws:
- Identity
 deserialiseKey(serialiseKeyx) == x- Identity up to slicing
 deserialiseKey(packSliceprefix (serialiseKeyx) suffix) == x
Instances
class SerialiseKey k => SerialiseKeyOrderPreserving k Source #
Order-preserving serialisation of keys.
Table data is sorted by serialised keys. Range lookups and cursors return entries in this order. If serialisation does not preserve the ordering of unserialised keys, then range lookups and cursors return entries out of order.
If the SerialiseKey instance for a type preserves the ordering,
then it can safely be given an instance of SerialiseKeyOrderPreserving.
These should satisfy the following law:
- Order-preserving
 x `compare` y ==serialiseKeyx `compare`serialiseKeyy
Serialised keys are lexicographically ordered. To satisfy the Order-preserving law, keys should be serialised into a big-endian format.
Instances
class SerialiseValue v where Source #
Serialisation of values and blobs.
Instances should satisfy the following laws:
- Identity
 deserialiseValue(serialiseValuex) == x- Identity up to slicing
 deserialiseValue(packSliceprefix (serialiseValuex) suffix) == x
Instances
Key/Value Serialisation Property Tests
serialiseKeyIdentity :: (Eq k, SerialiseKey k) => k -> Bool Source #
Test the Identity law for the SerialiseKey class
serialiseKeyIdentityUpToSlicing :: (Eq k, SerialiseKey k) => RawBytes -> k -> RawBytes -> Bool Source #
Test the Identity up to slicing law for the SerialiseKey class
serialiseKeyPreservesOrdering :: (Ord k, SerialiseKey k) => k -> k -> Bool Source #
Test the Order-preserving law for the SerialiseKeyOrderPreserving class
serialiseValueIdentity :: (Eq v, SerialiseValue v) => v -> Bool Source #
Test the Identity law for the SerialiseValue class
serialiseValueIdentityUpToSlicing :: (Eq v, SerialiseValue v) => RawBytes -> v -> RawBytes -> Bool Source #
Test the Identity up to slicing law for the SerialiseValue class
packSlice :: RawBytes -> RawBytes -> RawBytes -> RawBytes Source #
 makes packSlice prefix x suffixx into a slice with prefix bytes on
 the left and suffix bytes on the right.
Monoidal Value Resolution
class ResolveValue v where Source #
An instance of  specifies how to merge values when using
monoidal upsert.ResolveValue v
The class has two functions.
The function resolve acts on values, whereas the function resolveSerialised acts on serialised values.
Each function has a default implementation in terms of the other and serialisation/deserialisation.
The user is encouraged to implement resolveSerialised.
Instances should satisfy the following:
- Compatibility
 - The functions 
resolveandresolveSerialisedshould be compatible:serialiseValue (resolve v1 v2) == resolveSerialised (Proxy @v) (serialiseValue v1) (serialiseValue v2)
 - Associativity
 - The function 
resolveshould be associative:serialiseValue (v1 `resolve` (v2 `resolve` v3)) == serialiseValue ((v1 `resolve` v2) `resolve` v3)
 - Valid Output
 - The function 
resolveSerialisedshould only return deserialisable values:deserialiseValue (resolveSerialised (Proxy @v) rb1 rb2) `deepseq` True
 
Minimal complete definition
Methods
resolve :: v -> v -> v Source #
Combine two values.
resolveSerialised :: Proxy v -> RawBytes -> RawBytes -> RawBytes Source #
Combine two serialised values.
The user may assume that the input bytes are valid and can be deserialised using deserialiseValue.
  The inputs are only ever produced by serialiseValue and resolveSerialised.
Instances
| ResolveValue Void | |
| (Num v, SerialiseValue v) => ResolveValue (Sum v) | |
| ResolveValue (ResolveAsFirst v) | |
Defined in Database.LSMTree.Internal.Types Methods resolve :: ResolveAsFirst v -> ResolveAsFirst v -> ResolveAsFirst v Source # resolveSerialised :: Proxy (ResolveAsFirst v) -> RawBytes -> RawBytes -> RawBytes Source #  | |
| (SerialiseValue v, Semigroup v) => ResolveValue (ResolveViaSemigroup v) | |
Defined in Database.LSMTree.Internal.Types Methods resolve :: ResolveViaSemigroup v -> ResolveViaSemigroup v -> ResolveViaSemigroup v Source # resolveSerialised :: Proxy (ResolveViaSemigroup v) -> RawBytes -> RawBytes -> RawBytes Source #  | |
newtype ResolveViaSemigroup v Source #
Wrapper that provides an instance of ResolveValue via the Semigroup
instance of the underlying type.
resolve (ResolveViaSemigroup v1) (ResolveViaSemigroup v2) = ResolveViaSemigroup (v1 <> v2)
Constructors
| ResolveViaSemigroup v | 
Instances
| Show v => Show (ResolveViaSemigroup v) | |
Defined in Database.LSMTree.Internal.Types Methods showsPrec :: Int -> ResolveViaSemigroup v -> ShowS # show :: ResolveViaSemigroup v -> String # showList :: [ResolveViaSemigroup v] -> ShowS #  | |
| Eq v => Eq (ResolveViaSemigroup v) | |
Defined in Database.LSMTree.Internal.Types Methods (==) :: ResolveViaSemigroup v -> ResolveViaSemigroup v -> Bool # (/=) :: ResolveViaSemigroup v -> ResolveViaSemigroup v -> Bool #  | |
| SerialiseValue v => SerialiseValue (ResolveViaSemigroup v) | |
Defined in Database.LSMTree.Internal.Types Methods serialiseValue :: ResolveViaSemigroup v -> RawBytes Source # deserialiseValue :: RawBytes -> ResolveViaSemigroup v Source #  | |
| (SerialiseValue v, Semigroup v) => ResolveValue (ResolveViaSemigroup v) | |
Defined in Database.LSMTree.Internal.Types Methods resolve :: ResolveViaSemigroup v -> ResolveViaSemigroup v -> ResolveViaSemigroup v Source # resolveSerialised :: Proxy (ResolveViaSemigroup v) -> RawBytes -> RawBytes -> RawBytes Source #  | |
newtype ResolveAsFirst v Source #
Wrapper that provides an instance of ResolveValue such that upsert behaves as insert.
The name ResolveAsFirst is in reference to the wrapper First from Data.Semigroup.
resolve = const
Constructors
| ResolveAsFirst | |
Fields 
  | |
Instances
| Show v => Show (ResolveAsFirst v) | |
Defined in Database.LSMTree.Internal.Types Methods showsPrec :: Int -> ResolveAsFirst v -> ShowS # show :: ResolveAsFirst v -> String # showList :: [ResolveAsFirst v] -> ShowS #  | |
| Eq v => Eq (ResolveAsFirst v) | |
Defined in Database.LSMTree.Internal.Types Methods (==) :: ResolveAsFirst v -> ResolveAsFirst v -> Bool # (/=) :: ResolveAsFirst v -> ResolveAsFirst v -> Bool #  | |
| SerialiseValue v => SerialiseValue (ResolveAsFirst v) | |
Defined in Database.LSMTree.Internal.Types Methods serialiseValue :: ResolveAsFirst v -> RawBytes Source # deserialiseValue :: RawBytes -> ResolveAsFirst v Source #  | |
| ResolveValue (ResolveAsFirst v) | |
Defined in Database.LSMTree.Internal.Types Methods resolve :: ResolveAsFirst v -> ResolveAsFirst v -> ResolveAsFirst v Source # resolveSerialised :: Proxy (ResolveAsFirst v) -> RawBytes -> RawBytes -> RawBytes Source #  | |
Monoidal Value Resolution Property Tests
resolveCompatibility :: (SerialiseValue v, ResolveValue v) => v -> v -> Bool Source #
Test the Compatibility law for the ResolveValue class.
resolveValidOutput :: (SerialiseValue v, ResolveValue v, NFData v) => v -> v -> Bool Source #
Test the Valid Output law for the ResolveValue class.
resolveAssociativity :: (SerialiseValue v, ResolveValue v) => v -> v -> v -> Bool Source #
Test the Associativity law for the ResolveValue class.
Errors
data SessionDirDoesNotExistError Source #
The session directory does not exist.
Constructors
| ErrSessionDirDoesNotExist !FsErrorPath | 
Instances
| Exception SessionDirDoesNotExistError | |
| Show SessionDirDoesNotExistError | |
Defined in Database.LSMTree.Internal.Unsafe Methods showsPrec :: Int -> SessionDirDoesNotExistError -> ShowS # show :: SessionDirDoesNotExistError -> String # showList :: [SessionDirDoesNotExistError] -> ShowS #  | |
| Eq SessionDirDoesNotExistError | |
Defined in Database.LSMTree.Internal.Unsafe Methods (==) :: SessionDirDoesNotExistError -> SessionDirDoesNotExistError -> Bool # (/=) :: SessionDirDoesNotExistError -> SessionDirDoesNotExistError -> Bool #  | |
data SessionDirLockedError Source #
The session directory is locked by another active session.
Constructors
| ErrSessionDirLocked !FsErrorPath | 
Instances
| Exception SessionDirLockedError | |
Defined in Database.LSMTree.Internal.Unsafe  | |
| Show SessionDirLockedError | |
Defined in Database.LSMTree.Internal.Unsafe Methods showsPrec :: Int -> SessionDirLockedError -> ShowS # show :: SessionDirLockedError -> String # showList :: [SessionDirLockedError] -> ShowS #  | |
| Eq SessionDirLockedError | |
Defined in Database.LSMTree.Internal.Unsafe Methods (==) :: SessionDirLockedError -> SessionDirLockedError -> Bool # (/=) :: SessionDirLockedError -> SessionDirLockedError -> Bool #  | |
data SessionDirCorruptedError Source #
The session directory is corrupted, e.g., it misses required files or contains unexpected files.
Constructors
| ErrSessionDirCorrupted !Text !FsErrorPath | 
Instances
| Exception SessionDirCorruptedError | |
Defined in Database.LSMTree.Internal.Unsafe  | |
| Show SessionDirCorruptedError | |
Defined in Database.LSMTree.Internal.Unsafe Methods showsPrec :: Int -> SessionDirCorruptedError -> ShowS # show :: SessionDirCorruptedError -> String # showList :: [SessionDirCorruptedError] -> ShowS #  | |
| Eq SessionDirCorruptedError | |
Defined in Database.LSMTree.Internal.Unsafe Methods (==) :: SessionDirCorruptedError -> SessionDirCorruptedError -> Bool # (/=) :: SessionDirCorruptedError -> SessionDirCorruptedError -> Bool #  | |
data SessionClosedError Source #
The session is closed.
Constructors
| ErrSessionClosed | 
Instances
| Exception SessionClosedError | |
Defined in Database.LSMTree.Internal.Unsafe Methods toException :: SessionClosedError -> SomeException # fromException :: SomeException -> Maybe SessionClosedError #  | |
| Show SessionClosedError | |
Defined in Database.LSMTree.Internal.Unsafe Methods showsPrec :: Int -> SessionClosedError -> ShowS # show :: SessionClosedError -> String # showList :: [SessionClosedError] -> ShowS #  | |
| Eq SessionClosedError | |
Defined in Database.LSMTree.Internal.Unsafe Methods (==) :: SessionClosedError -> SessionClosedError -> Bool # (/=) :: SessionClosedError -> SessionClosedError -> Bool #  | |
data TableClosedError Source #
The table is closed.
Constructors
| ErrTableClosed | 
Instances
| Exception TableClosedError | |
Defined in Database.LSMTree.Internal.Unsafe Methods toException :: TableClosedError -> SomeException #  | |
| Show TableClosedError | |
Defined in Database.LSMTree.Internal.Unsafe Methods showsPrec :: Int -> TableClosedError -> ShowS # show :: TableClosedError -> String # showList :: [TableClosedError] -> ShowS #  | |
| Eq TableClosedError | |
Defined in Database.LSMTree.Internal.Unsafe Methods (==) :: TableClosedError -> TableClosedError -> Bool # (/=) :: TableClosedError -> TableClosedError -> Bool #  | |
data TableCorruptedError Source #
The table data is corrupted.
Constructors
| ErrLookupByteCountDiscrepancy | |
Instances
| Exception TableCorruptedError | |
Defined in Database.LSMTree.Internal.Lookup Methods toException :: TableCorruptedError -> SomeException # fromException :: SomeException -> Maybe TableCorruptedError #  | |
| Show TableCorruptedError | |
Defined in Database.LSMTree.Internal.Lookup Methods showsPrec :: Int -> TableCorruptedError -> ShowS # show :: TableCorruptedError -> String # showList :: [TableCorruptedError] -> ShowS #  | |
| Eq TableCorruptedError | |
Defined in Database.LSMTree.Internal.Lookup Methods (==) :: TableCorruptedError -> TableCorruptedError -> Bool # (/=) :: TableCorruptedError -> TableCorruptedError -> Bool #  | |
data TableTooLargeError Source #
The table contains a run that has more than \(2^{40}\) physical entries.
Constructors
| ErrTableTooLarge | 
Instances
| Exception TableTooLargeError | |
Defined in Database.LSMTree.Internal.MergingRun Methods toException :: TableTooLargeError -> SomeException # fromException :: SomeException -> Maybe TableTooLargeError #  | |
| Show TableTooLargeError | |
Defined in Database.LSMTree.Internal.MergingRun Methods showsPrec :: Int -> TableTooLargeError -> ShowS # show :: TableTooLargeError -> String # showList :: [TableTooLargeError] -> ShowS #  | |
| Eq TableTooLargeError | |
Defined in Database.LSMTree.Internal.MergingRun Methods (==) :: TableTooLargeError -> TableTooLargeError -> Bool # (/=) :: TableTooLargeError -> TableTooLargeError -> Bool #  | |
data TableUnionNotCompatibleError Source #
A table union was constructed with two tables that are not compatible.
Constructors
| ErrTableUnionHandleTypeMismatch | |
| ErrTableUnionSessionMismatch | |
Fields 
  | |
Instances
data SnapshotExistsError Source #
The named snapshot already exists.
Constructors
| ErrSnapshotExists !SnapshotName | 
Instances
| Exception SnapshotExistsError | |
Defined in Database.LSMTree.Internal.Unsafe Methods toException :: SnapshotExistsError -> SomeException # fromException :: SomeException -> Maybe SnapshotExistsError #  | |
| Show SnapshotExistsError | |
Defined in Database.LSMTree.Internal.Unsafe Methods showsPrec :: Int -> SnapshotExistsError -> ShowS # show :: SnapshotExistsError -> String # showList :: [SnapshotExistsError] -> ShowS #  | |
| Eq SnapshotExistsError | |
Defined in Database.LSMTree.Internal.Unsafe Methods (==) :: SnapshotExistsError -> SnapshotExistsError -> Bool # (/=) :: SnapshotExistsError -> SnapshotExistsError -> Bool #  | |
data SnapshotDoesNotExistError Source #
The named snapshot does not exist.
Constructors
| ErrSnapshotDoesNotExist !SnapshotName | 
Instances
| Exception SnapshotDoesNotExistError | |
Defined in Database.LSMTree.Internal.Unsafe  | |
| Show SnapshotDoesNotExistError | |
Defined in Database.LSMTree.Internal.Unsafe Methods showsPrec :: Int -> SnapshotDoesNotExistError -> ShowS # show :: SnapshotDoesNotExistError -> String # showList :: [SnapshotDoesNotExistError] -> ShowS #  | |
| Eq SnapshotDoesNotExistError | |
Defined in Database.LSMTree.Internal.Unsafe Methods (==) :: SnapshotDoesNotExistError -> SnapshotDoesNotExistError -> Bool # (/=) :: SnapshotDoesNotExistError -> SnapshotDoesNotExistError -> Bool #  | |
data SnapshotCorruptedError Source #
The named snapshot is corrupted.
Constructors
| ErrSnapshotCorrupted !SnapshotName !FileCorruptedError | 
Instances
| Exception SnapshotCorruptedError | |
Defined in Database.LSMTree.Internal.Unsafe  | |
| Show SnapshotCorruptedError | |
Defined in Database.LSMTree.Internal.Unsafe Methods showsPrec :: Int -> SnapshotCorruptedError -> ShowS # show :: SnapshotCorruptedError -> String # showList :: [SnapshotCorruptedError] -> ShowS #  | |
| Eq SnapshotCorruptedError | |
Defined in Database.LSMTree.Internal.Unsafe Methods (==) :: SnapshotCorruptedError -> SnapshotCorruptedError -> Bool # (/=) :: SnapshotCorruptedError -> SnapshotCorruptedError -> Bool #  | |
data SnapshotNotCompatibleError Source #
The named snapshot is not compatible with the expected type.
Constructors
| ErrSnapshotWrongLabel | The named snapshot is not compatible with the given label.  | 
Fields 
  | |
Instances
| Exception SnapshotNotCompatibleError | |
| Show SnapshotNotCompatibleError | |
Defined in Database.LSMTree.Internal.Unsafe Methods showsPrec :: Int -> SnapshotNotCompatibleError -> ShowS # show :: SnapshotNotCompatibleError -> String # showList :: [SnapshotNotCompatibleError] -> ShowS #  | |
| Eq SnapshotNotCompatibleError | |
Defined in Database.LSMTree.Internal.Unsafe Methods (==) :: SnapshotNotCompatibleError -> SnapshotNotCompatibleError -> Bool # (/=) :: SnapshotNotCompatibleError -> SnapshotNotCompatibleError -> Bool #  | |
data BlobRefInvalidError Source #
A BlobRef used with retrieveBlobs was invalid.
BlobRefs are obtained from lookups in a Table, but they may be
invalidated by subsequent changes in that Table. In general the
reliable way to retrieve blobs is not to change the Table before
retrieving the blobs. To allow later retrievals, duplicate the table
before making modifications and keep the table open until all blob
retrievals are complete.
Constructors
| ErrBlobRefInvalid !Int | The   | 
Instances
| Exception BlobRefInvalidError | |
Defined in Database.LSMTree.Internal.Unsafe Methods toException :: BlobRefInvalidError -> SomeException # fromException :: SomeException -> Maybe BlobRefInvalidError #  | |
| Show BlobRefInvalidError | |
Defined in Database.LSMTree.Internal.Unsafe Methods showsPrec :: Int -> BlobRefInvalidError -> ShowS # show :: BlobRefInvalidError -> String # showList :: [BlobRefInvalidError] -> ShowS #  | |
| Eq BlobRefInvalidError | |
Defined in Database.LSMTree.Internal.Unsafe Methods (==) :: BlobRefInvalidError -> BlobRefInvalidError -> Bool # (/=) :: BlobRefInvalidError -> BlobRefInvalidError -> Bool #  | |
data CursorClosedError Source #
The cursor is closed.
Constructors
| ErrCursorClosed | 
Instances
| Exception CursorClosedError | |
Defined in Database.LSMTree.Internal.Unsafe Methods toException :: CursorClosedError -> SomeException #  | |
| Show CursorClosedError | |
Defined in Database.LSMTree.Internal.Unsafe Methods showsPrec :: Int -> CursorClosedError -> ShowS # show :: CursorClosedError -> String # showList :: [CursorClosedError] -> ShowS #  | |
| Eq CursorClosedError | |
Defined in Database.LSMTree.Internal.Unsafe Methods (==) :: CursorClosedError -> CursorClosedError -> Bool # (/=) :: CursorClosedError -> CursorClosedError -> Bool #  | |
data InvalidSnapshotNameError Source #
Constructors
| ErrInvalidSnapshotName !String | 
Instances
| Exception InvalidSnapshotNameError | |
Defined in Database.LSMTree.Internal.Paths  | |
| Show InvalidSnapshotNameError | |
Defined in Database.LSMTree.Internal.Paths Methods showsPrec :: Int -> InvalidSnapshotNameError -> ShowS # show :: InvalidSnapshotNameError -> String # showList :: [InvalidSnapshotNameError] -> ShowS #  | |
| Eq InvalidSnapshotNameError | |
Defined in Database.LSMTree.Internal.Paths Methods (==) :: InvalidSnapshotNameError -> InvalidSnapshotNameError -> Bool # (/=) :: InvalidSnapshotNameError -> InvalidSnapshotNameError -> Bool #  | |
Traces
data Tracer (m :: Type -> Type) a Source #
This type describes some effect in m which depends upon some value of
 type a, for which the output value is not of interest (only the effects).
The motivating use case is to describe tracing, logging, monitoring, and similar features, in which the programmer wishes to provide some values to some other program which will do some real world side effect, such as writing to a log file or bumping a counter in some monitoring system.
The actual implementation of such a program will probably work on rather
 large, domain-agnostic types like Text, ByteString, JSON values for
 structured logs, etc.
But the call sites which ultimately invoke these implementations will deal with smaller, domain-specific types that concisely describe events, metrics, debug information, etc.
This difference is reconciled by the Contravariant instance for Tracer.
 contramap is used to change the input type of
 a tracer. This allows for a more general tracer to be used where a more
 specific one is expected.
Intuitively: if you can map your domain-specific type Event to a Text
 representation, then any Tracer m Text can stand in where a
 Tracer m Event is required.
eventToText :: Event -> Text traceTextToLogFile :: Tracer m Text traceEventToLogFile :: Tracer m Event traceEventToLogFile = contramap eventToText traceTextToLogFile
Effectful tracers that actually do interesting stuff can be defined
 using emit, and composed via contramap.
The nullTracer can be used as a stand-in for any tracer, doing no
 side-effects and producing no interesting value.
To deal with branching, the arrow interface on the underlying
 Tracer should be used. Arrow notation can be helpful
 here.
For example, a common pattern is to trace only some variants of a sum type.
data Event = This Int | That Bool
traceOnlyThat :: Tracer m Int -> Tracer m Bool
traceOnlyThat tr = Tracer $ proc event -> do
  case event of
    This i -> use tr  -< i
    That _ -> squelch -< ()The key point of using the arrow representation we have here is that this
 tracer will not necessarily need to force event: if the input tracer tr
 does not force its value, then event will not be forced. To elaborate,
 suppose tr is nullTracer. Then this expression becomes
classify (This i) = Left i classify (That _) = Right () traceOnlyThat tr = Tracer $ Pure classify >>> (squelch ||| squelch) >>> Pure (either id id) = Tracer $ Pure classify >>> Pure (either (const (Left ())) (const (Right ()))) >>> Pure (either id id) = Tracer $ Pure (classify >>> either (const (Left ())) (const (Right ())) >>> either id id)
So that when this tracer is run by traceWith we get
traceWith (traceOnlyThat tr) x = traceWith (Pure _) = pure ()
It is _essential_ that the computation of the tracing effects cannot itself
 have side-effects, as this would ruin the ability to short-circuit when
 it is known that no tracing will be done: the side-effects of a branch
 could change the outcome of another branch. This would fly in the face of
 a crucial design goal: you can leave your tracer calls in the program so
 they do not bitrot, but can also make them zero runtime cost by substituting
 nullTracer appropriately.
data LSMTreeTrace Source #
Constructors
| TraceSession | Trace messages related to sessions.  | 
Fields 
  | |
| TraceTable | Trace messages related to tables.  | 
Fields 
  | |
| TraceCursor | Trace messages related to cursors.  | 
Fields 
  | |
Instances
| Show LSMTreeTrace | |
Defined in Database.LSMTree.Internal.Unsafe Methods showsPrec :: Int -> LSMTreeTrace -> ShowS # show :: LSMTreeTrace -> String # showList :: [LSMTreeTrace] -> ShowS #  | |
| Eq LSMTreeTrace | |
Defined in Database.LSMTree.Internal.Unsafe  | |
data SessionTrace Source #
Trace messages related to sessions.
Constructors
| TraceOpenSession | We are opening a session in the requested session directory. A
   | 
| TraceNewSession | We are creating a new, fresh session. A   | 
| TraceRestoreSession | We are restoring a session from the directory contents. A
   | 
| TraceCreatedSession | A session has been successfully created.  | 
| TraceCloseSession | We are closing the session. A   | 
| TraceClosedSession | Closing the session was successful.  | 
| TraceDeleteSnapshot SnapshotName | We are deleting the snapshot with the given name. A
   | 
| TraceDeletedSnapshot SnapshotName | We have successfully deleted the snapshot with the given name.  | 
| TraceListSnapshots | We are listing snapshots.  | 
| TraceRetrieveBlobs Int | We are retrieving blobs.  | 
Instances
| Show SessionTrace | |
Defined in Database.LSMTree.Internal.Unsafe Methods showsPrec :: Int -> SessionTrace -> ShowS # show :: SessionTrace -> String # showList :: [SessionTrace] -> ShowS #  | |
| Eq SessionTrace | |
Defined in Database.LSMTree.Internal.Unsafe  | |
data TableTrace Source #
Trace messages related to tables.
Constructors
| TraceCreatedTable | A table has been successfully created with the specified config.  | 
Fields 
  | |
| TraceNewTable TableConfig | We are creating a new, fresh table with the specified config. A
   | 
| TraceCloseTable | We are closing the table. A   | 
| TraceClosedTable | Closing the table was succesful.  | 
| TraceLookups | We are performing a batch of lookups.  | 
Fields 
  | |
| TraceRangeLookup (Range SerialisedKey) | We are performing a range lookup.  | 
| TraceUpdates | We are performing a batch of updates.  | 
Fields 
  | |
| TraceUpdated | We have successfully performed a batch of updates.  | 
Fields 
  | |
| TraceOpenTableFromSnapshot SnapshotName TableConfigOverride | We are opening a table from a snapshot. A   | 
| TraceSaveSnapshot SnapshotName | We are saving a snapshot with the given name. A   | 
| TraceSavedSnapshot SnapshotName | A snapshot with the given name was saved successfully.  | 
| TraceDuplicate | We are creating a duplicate of a table. A   | 
Fields 
  | |
| TraceIncrementalUnions | We are creating an incremental union of a list of tables. A
   | 
| TraceRemainingUnionDebt | We are querying the remaining union debt.  | 
| TraceSupplyUnionCredits UnionCredits | We are supplying the given number of union credits.  | 
| TraceSuppliedUnionCredits | We have supplied union credits.  | 
Fields 
  | |
| TraceMerge (AtLevel MergeTrace) | |
Instances
| Show TableTrace | |
Defined in Database.LSMTree.Internal.Unsafe Methods showsPrec :: Int -> TableTrace -> ShowS # show :: TableTrace -> String # showList :: [TableTrace] -> ShowS #  | |
| Eq TableTrace | |
Defined in Database.LSMTree.Internal.Unsafe  | |
data CursorTrace Source #
Trace messages related to cursors.
Constructors
| TraceCreatedCursor | A cursor has been successfully created.  | 
Fields 
  | |
| TraceNewCursor | We are creating a new, fresh cursor positioned at the given offset key.
 A   | 
| TraceCloseCursor | We are closing the cursor. A   | 
| TraceClosedCursor | Closing the cursor was succesful.  | 
| TraceReadingCursor | We are reading from the cursor. A   | 
Fields 
  | |
| TraceReadCursor | We have succesfully read from the cursor.  | 
Instances
| Show CursorTrace | |
Defined in Database.LSMTree.Internal.Unsafe Methods showsPrec :: Int -> CursorTrace -> ShowS # show :: CursorTrace -> String # showList :: [CursorTrace] -> ShowS #  | |
| Eq CursorTrace | |
Defined in Database.LSMTree.Internal.Unsafe  | |
Instances
| Show TableId | |
| NFData TableId | |
Defined in Database.LSMTree.Internal.RunNumber  | |
| Eq TableId | |
| Ord TableId | |
Defined in Database.LSMTree.Internal.RunNumber  | |
Instances
| Show CursorId | |
| NFData CursorId | |
Defined in Database.LSMTree.Internal.RunNumber  | |
| Eq CursorId | |
| Ord CursorId | |
Defined in Database.LSMTree.Internal.RunNumber  | |