diff --git a/CHANGELOG.md b/CHANGELOG.md index 6f97b9c2c..97f83ec75 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -14,7 +14,19 @@ The `Unreleased` section name is replaced by the expected version of next releas - `Cosmos`: Reorganize Sync log message text, merge with Sync Conflict message [#241](https://github.com/jet/equinox/pull/241) - `Cosmos`: Converge Stored Procedure Impl with `tip-isa-batch` impl from V3 (minor Request Charges cost reduction) [#242](https://github.com/jet/equinox/pull/242) -- target `Microsoft.Azure.Cosmos` v `3.9.0` (instead of `Microsoft.Azure.DocumentDB`[`.Core`] v 2.x) [#144](https://github.com/jet/equinox/pull/144) +- Fork `Equinox.Cosmos` to `Equinox.CosmosStore` + - target `Microsoft.Azure.Cosmos` v `3.9.0` (instead of `Microsoft.Azure.DocumentDB`[`.Core`] v 2.x) [#144](https://github.com/jet/equinox/pull/144) + - Removed [warmup call](https://github.com/Azure/azure-cosmos-dotnet-v3/issues/1436) + - Rename `Equinox.Cosmos` DLL and namespace to `Equinox.CosmosStore` [#243](https://github.com/jet/equinox/pull/243) + - Rename `Equinox.Cosmos.Store` -> `Equinox.CosmosStore.Core` + - `Core` sub-namespace + - Rename `Equinox.Cosmos.Core.Context` -> `Equinox.CosmosStore.Core.EventsContext` + - Change `Equinox.Cosmos.Core.Connection` -> `Equinox.CosmosStore.Core.RetryPolicy` + - Rename `Equinox.Cosmos.Core.Gateway` -> `Equinox.CosmosStore.Core.StoreClient` + - Rename `Equinox.Cosmos.Containers` -> `Equinox.CosmosStore.CosmosStoreConnection` + - Rename `Equinox.Cosmos.Context` -> `Equinox.CosmosStore.CosmosStoreContext` + - Rename `Equinox.Cosmos.Resolver` -> `Equinox.CosmosStore.CosmosStoreCategory` + - Rename `Equinox.Cosmos.Connector` -> `Equinox.CosmosStore.CosmosStoreClientFactory` - target `EventStore.Client` v `20.6` (instead of v `5.0.x`) [#224](https://github.com/jet/equinox/pull/224) - Retarget `netcoreapp2.1` apps to `netcoreapp3.1` with `SystemTextJson` - Retarget Todobackend to `aspnetcore` v `3.1` @@ -24,7 +36,6 @@ The `Unreleased` section name is replaced by the expected version of next releas - Update to `3.1.101` SDK - Remove `module Commands` convention from in examples - Revise semantics of Cart Sample Command handling -- `Cosmos:` Removed [warmup call](https://github.com/Azure/azure-cosmos-dotnet-v3/issues/1436) - Simplify `AsyncCacheCell` [#229](https://github.com/jet/equinox/pull/229) ### Removed diff --git a/DOCUMENTATION.md b/DOCUMENTATION.md index f9e88d709..39fcad83e 100755 --- a/DOCUMENTATION.md +++ b/DOCUMENTATION.md @@ -164,23 +164,23 @@ slightly differently: ![Equinox.EventStore/SqlStreamStore c4model.com Code - another process; using snapshotting](http://www.plantuml.com/plantuml/proxy?cache=no&src=https://raw.github.com/jet/equinox/master/diagrams/EventStoreCode.puml&idx=3&fmt=svg) -# Equinox.Cosmos +# Equinox.CosmosStore -## Container Diagram for `Equinox.Cosmos` +## Container Diagram for `Equinox.CosmosStore` -![Equinox.Cosmos c4model.com Container Diagram](http://www.plantuml.com/plantuml/proxy?cache=no&src=https://raw.github.com/jet/equinox/master/diagrams/CosmosContainer.puml?fmt=svg) +![Equinox.CosmosStore c4model.com Container Diagram](http://www.plantuml.com/plantuml/proxy?cache=no&src=https://raw.github.com/jet/equinox/master/diagrams/CosmosContainer.puml?fmt=svg) -## Component Diagram for `Equinox.Cosmos` +## Component Diagram for `Equinox.CosmosStore` -![Equinox.Cosmos c4model.com Component Diagram](http://www.plantuml.com/plantuml/proxy?cache=no&src=https://raw.github.com/jet/equinox/master/diagrams/CosmosComponent.puml?fmt=svg) +![Equinox.CosmosStore c4model.com Component Diagram](http://www.plantuml.com/plantuml/proxy?cache=no&src=https://raw.github.com/jet/equinox/master/diagrams/CosmosComponent.puml?fmt=svg) -## Code Diagrams for `Equinox.Cosmos` +## Code Diagrams for `Equinox.CosmosStore` This diagram walks through the basic sequence of operations, where: - this node has not yet read this stream (i.e. there's nothing in the Cache) - when we do read it, the Read call returns `404` (with a charge of `1 RU`) -![Equinox.Cosmos c4model.com Code - first Time](http://www.plantuml.com/plantuml/proxy?cache=no&src=https://raw.github.com/jet/equinox/master/diagrams/CosmosCode.puml&idx=0&fmt=svg) +![Equinox.CosmosStore c4model.com Code - first Time](http://www.plantuml.com/plantuml/proxy?cache=no&src=https://raw.github.com/jet/equinox/master/diagrams/CosmosCode.puml&idx=0&fmt=svg) Next, we extend the scenario to show: - how state held in the Cache influences the Cosmos APIs used @@ -194,12 +194,12 @@ Next, we extend the scenario to show: - when there's conflict and we're giving up (throw `MaxAttemptsExceededException`) -![Equinox.Cosmos c4model.com Code - with cache, snapshotting](http://www.plantuml.com/plantuml/proxy?cache=no&src=https://raw.github.com/jet/equinox/master/diagrams/CosmosCode.puml&idx=1&fmt=svg) +![Equinox.CosmosStore c4model.com Code - with cache, snapshotting](http://www.plantuml.com/plantuml/proxy?cache=no&src=https://raw.github.com/jet/equinox/master/diagrams/CosmosCode.puml&idx=1&fmt=svg) After the write, we circle back to illustrate the effect of the caching when we have correct state (we get a `304 Not Mofified` and pay only `1 RU`) -![Equinox.Cosmos c4model.com Code - next time; same process, i.e. cached](http://www.plantuml.com/plantuml/proxy?cache=no&src=https://raw.github.com/jet/equinox/master/diagrams/CosmosCode.puml&idx=2&fmt=svg) +![Equinox.CosmosStore c4model.com Code - next time; same process, i.e. cached](http://www.plantuml.com/plantuml/proxy?cache=no&src=https://raw.github.com/jet/equinox/master/diagrams/CosmosCode.puml&idx=2&fmt=svg) In other processes (when a cache is not fully in sync), the sequence runs slightly differently: @@ -208,7 +208,7 @@ slightly differently: suitable snapshot that passes the `isOrigin` predicate is found within the _Tip_ -![Equinox.Cosmos c4model.com Code - another process; using snapshotting](http://www.plantuml.com/plantuml/proxy?cache=no&src=https://raw.github.com/jet/equinox/master/diagrams/CosmosCode.puml&idx=3&fmt=svg) +![Equinox.CosmosStore c4model.com Code - another process; using snapshotting](http://www.plantuml.com/plantuml/proxy?cache=no&src=https://raw.github.com/jet/equinox/master/diagrams/CosmosCode.puml&idx=3&fmt=svg) # Glossary @@ -410,12 +410,12 @@ module EventStore = module Cosmos = let accessStrategy = - Equinox.Cosmos.AccessStrategy.Snapshot (Fold.isOrigin, Fold.snapshot) + Equinox.CosmosStore.AccessStrategy.Snapshot (Fold.isOrigin, Fold.snapshot) let create (context, cache) = let cacheStrategy = - Equinox.Cosmos.CachingStrategy.SlidingWindow (cache, System.TimeSpan.FromMinutes 20.) + Equinox.CosmosStore.CachingStrategy.SlidingWindow (cache, System.TimeSpan.FromMinutes 20.) let resolver = - Equinox.Cosmos.Resolver(context, Events.codec, Fold.fold, Fold.initial, cacheStrategy, accessStrategy) + Equinox.CosmCosmosStoreos.Resolver(context, Events.codec, Fold.fold, Fold.initial, cacheStrategy, accessStrategy) create resolver.Resolve ``` @@ -1558,12 +1558,12 @@ not reading data redundantly, and not feeding back into the oneself (although having separate roundtrips obviously has implications). -# `Equinox.Cosmos` CosmosDB Storage Model +# `Equinox.CosmosStore` CosmosDB Storage Model -This article provides a walkthrough of how `Equinox.Cosmos` encodes, writes and +This article provides a walkthrough of how `Equinox.CosmosStore` encodes, writes and reads records from a stream under its control. -The code (see [source](src/Equinox.Cosmos/Cosmos.fs#L6)) contains lots of +The code (see [source](src/Equinox.CosmosStore/CosmosStore.fs#L6)) contains lots of comments and is intended to be read - this just provides some background. ## Batches @@ -1648,7 +1648,7 @@ basic elements - Unfolds - the term `unfold` is based on the well known 'standard' FP function of that name, bearing the signature `'state -> 'event seq`. **=> For - `Equinox.Cosmos`, one might say `unfold` yields _projection_ s as _event_ s + `Equinox.CosmosStore`, one might say `unfold` yields _projection_ s as _event_ s to _snapshot_ the _state_ as at that _position_ in the _stream_**. ## Generating and saving `unfold`ed events @@ -1742,9 +1742,9 @@ based on the events presented. This covers what the most complete possible implementation of the JS Stored Procedure (see -[source](https://github.com/jet/equinox/blob/tip-isa-batch/src/Equinox.Cosmos/Cosmos.fs#L302)) +[source](https://github.com/jet/equinox/blob/tip-isa-batch/src/Equinox.CosmosStore/Cosmos.fs#L302)) does when presented with a batch to be written. (NB The present implementation -is slightly simplified; see [source](src/Equinox.Cosmos/Cosmos.fs#L303). +is slightly simplified; see [source](src/Equinox.CosmosStore/CosmosStore.fs#L404). The `sync` stored procedure takes as input, a document that is almost identical to the format of the _`Tip`_ batch (in fact, if the stream is found to be @@ -1780,9 +1780,9 @@ stream). The request includes the following elements: retrying in the case of conflict, _without any events being written per state change_) -## Equinox.Cosmos.Core.Events +## Equinox.CosmosStore.Core.Events -The `Equinox.Cosmos.Core` namespace provides a lower level API that can be used +The `Equinox.CosmosStore.Core` namespace provides a lower level API that can be used to manipulate events stored within a Azure CosmosDb using optimized native access patterns. @@ -1807,7 +1807,7 @@ following key benefits: ```fsharp -open Equinox.Cosmos.Core +open Equinox.CosmosStore.Core // open MyCodecs.Json // example of using specific codec which can yield UTF-8 // byte arrays from a type using `Json.toBytes` via Fleece // or similar @@ -1818,7 +1818,7 @@ type EventData with // Load connection sring from your Key Vault (example here is the CosmosDb // simulator's well known key) -let connectionString: string = +let connectionString : string = "AccountEndpoint=https://localhost:8081;AccountKey=C2y6yDjf5/R+ob0N8A7Cgv30VRDJIWEHLM+4QDU5DE2nQ9nDuVTqobD4b8mGGyPMbIZnqyMsEcaGQy67XIw/Jw==;" // Forward to Log (you can use `Log.Logger` and/or `Log.ForContext` if your app @@ -1829,22 +1829,22 @@ let outputLog = LoggerConfiguration().WriteTo.NLog().CreateLogger() let gatewayLog = outputLog.ForContext(Serilog.Core.Constants.SourceContextPropertyName, "Equinox") -// When starting the app, we connect (once) -let connector : Equinox.Cosmos.Connector = - Connector( +let factory : Equinox.CosmosStore.CosmosStoreClientFactory = + CosmosStoreClientFactory( requestTimeout = TimeSpan.FromSeconds 5., maxRetryAttemptsOnThrottledRequests = 1, maxRetryWaitTimeInSeconds = 3, log = gatewayLog) -let cnx = - connector.Connect("Application.CommandProcessor", Discovery.FromConnectionString connectionString) +let client = + factory.Create("Application.CommandProcessor", Discovery.FromConnectionString connectionString) |> Async.RunSynchronously +let client = factory.Create(Discovery.ConnectionString connectionString) + // If storing in a single collection, one specifies the db and collection -// alternately use the overload that defers the mapping until the stream one is -// writing to becomes clear -let containerMap = Containers("databaseName", "containerName") -let ctx = Context(cnx, containerMap, gatewayLog) +// alternately use the overload that defers the mapping until the stream one is writing to becomes clear +let connection = CosmosStoreConnection(client, "databaseName", "containerName") +let ctx = EventsContext(connection, gatewayLog) // // Write an event @@ -1870,7 +1870,7 @@ An Access Strategy defines any optimizations regarding how one arrives at a State of an Aggregate based on the Events stored in a Stream in a Store. The specifics of an Access Strategy depend on what makes sense for a given -Store, i.e. `Equinox.Cosmos` necessarily has a significantly different set of +Store, i.e. `Equinox.CosmosStore` necessarily has a significantly different set of strategies than `Equinox.EventStore` (although there is an intersection). Access Strategies only affect performance; you should still be able to infer @@ -1881,9 +1881,9 @@ NOTE: its not important to select a strategy until you've actually actually modelled your aggregate, see [what if I change my access strategy](#changing-access-strategy) -## `Equinox.Cosmos.AccessStrategy` +## `Equinox.CosmosStore.AccessStrategy` -TL;DR `Equinox.Cosmos`: (see also: [the storage +TL;DR `Equinox.CosmosStore`: (see also: [the storage model](cosmos-storage-model) for a deep dive, and [glossary, below the table](#access-strategy-glossary) for definition of terms) - keeps all the Events for a Stream in a single [CosmosDB _logical @@ -2073,20 +2073,20 @@ EventStore, and it's Store adapter is the most proven and is pretty feature rich relative to the need of consumers to date. Some things remain though: - Provide a low level walking events in F# API akin to - `Equinox.Cosmos.Core.Events`; this would allow consumers to jump from direct + `Equinox.CosmosStore.Core.Events`; this would allow consumers to jump from direct use of `EventStore.ClientAPI` -> `Equinox.EventStore.Core.Events` -> `Equinox.Stream` (with the potential to swap stores once one gets to using `Equinox.Stream`) -- Get conflict handling as efficient and predictable as for `Equinox.Cosmos` +- Get conflict handling as efficient and predictable as for `Equinox.CosmosStore` https://github.com/jet/equinox/issues/28 - provide for snapshots to be stored out of the stream, and loaded in a customizable manner in a manner analogous to - [the proposed comparable `Equinox.Cosmos` facility](https://github.com/jet/equinox/issues/61). + [the proposed comparable `Equinox.CosmosStore` facility](https://github.com/jet/equinox/issues/61). -## Wouldn't it be nice - `Equinox.Cosmos` +## Wouldn't it be nice - `Equinox.CosmosStore` - Enable snapshots to be stored outside of the main collection in - `Equinox.Cosmos` [#61](https://github.com/jet/equinox/issues/61) + `Equinox.CosmosStore` [#61](https://github.com/jet/equinox/issues/61) - Multiple writers support for `u`nfolds (at present a `sync` completely replaces the unfolds in the Tip; this will be extended by having the stored proc maintain the union of the unfolds in play (both for semi-related diff --git a/Equinox.sln b/Equinox.sln index 80d4d4328..1be43e785 100644 --- a/Equinox.sln +++ b/Equinox.sln @@ -47,9 +47,9 @@ Project("{6EC3EE1D-3C4E-46DD-8F32-0CC8E7565705}") = "Equinox.MemoryStore.Integra EndProject Project("{6EC3EE1D-3C4E-46DD-8F32-0CC8E7565705}") = "Equinox.Tool", "tools\Equinox.Tool\Equinox.Tool.fsproj", "{C8992C1C-6DC5-42CD-A3D7-1C5663433FED}" EndProject -Project("{6EC3EE1D-3C4E-46DD-8F32-0CC8E7565705}") = "Equinox.Cosmos", "src\Equinox.Cosmos\Equinox.Cosmos.fsproj", "{54EA6187-9F9F-4D67-B602-163D011E43E6}" +Project("{6EC3EE1D-3C4E-46DD-8F32-0CC8E7565705}") = "Equinox.CosmosStore", "src\Equinox.CosmosStore\Equinox.CosmosStore.fsproj", "{54EA6187-9F9F-4D67-B602-163D011E43E6}" EndProject -Project("{6EC3EE1D-3C4E-46DD-8F32-0CC8E7565705}") = "Equinox.Cosmos.Integration", "tests\Equinox.Cosmos.Integration\Equinox.Cosmos.Integration.fsproj", "{DE0FEBF0-72DC-4D4A-BBA7-788D875D6B4B}" +Project("{6EC3EE1D-3C4E-46DD-8F32-0CC8E7565705}") = "Equinox.CosmosStore.Integration", "tests\Equinox.CosmosStore.Integration\Equinox.CosmosStore.Integration.fsproj", "{DE0FEBF0-72DC-4D4A-BBA7-788D875D6B4B}" EndProject Project("{6EC3EE1D-3C4E-46DD-8F32-0CC8E7565705}") = "TodoBackend", "samples\TodoBackend\TodoBackend.fsproj", "{EC2EC658-3D85-44F3-AD2F-52AFCAFF8871}" EndProject diff --git a/README.md b/README.md index f20d75451..6cef99749 100644 --- a/README.md +++ b/README.md @@ -38,7 +38,7 @@ Some aspects of the implementation are distilled from [`Jet.com` systems dating - support, (via the [`FsCodec.IEventCodec`](https://github.com/jet/FsCodec#IEventCodec)) for the maintenance of multiple co-existing compaction schemas for a given stream (A 'compaction' event/snapshot isa Event) - compaction events typically do not get deleted (consistent with how EventStore works), although it is safe to do so in concept - NB while this works well, and can deliver excellent performance (especially when allied with the Cache), [it's not a panacea, as noted in this excellent EventStore.org article on the topic](https://eventstore.org/docs/event-sourcing-basics/rolling-snapshots/index.html) -- **`Equinox.Cosmos` 'Tip with Unfolds' schema**: (In contrast to `Equinox.EventStore`'s `AccessStrategy.RollingSnapshots`,) when using `Equinox.Cosmos`, optimized command processing is managed via the `Tip`; a document per stream with a well-known identity enabling Syncing the r/w Position via a single point-read by virtue of the fact that the document maintains: +- **`Equinox.CosmosStore` 'Tip with Unfolds' schema**: (In contrast to `Equinox.EventStore`'s `AccessStrategy.RollingSnapshots`,) when using `Equinox.CosmosStore`, optimized command processing is managed via the `Tip`; a document per stream with a well-known identity enabling Syncing the r/w Position via a single point-read by virtue of the fact that the document maintains: a) the present Position of the stream - i.e. the index at which the next events will be appended for a given stream (events and the Tip share a common logical partition key) b) ephemeral (`deflate+base64` compressed) [_unfolds_](DOCUMENTATION.md#Cosmos-Storage-Model) c) (optionally) a holding buffer for events since those unfolded events ([presently removed](https://github.com/jet/equinox/pull/58), but [should return](DOCUMENTATION.md#Roadmap), see [#109](https://github.com/jet/equinox/pull/109)) @@ -49,7 +49,7 @@ Some aspects of the implementation are distilled from [`Jet.com` systems dating - no additional roundtrips to the store needed at either the Load or Sync points in the flow It should be noted that from a querying perspective, the `Tip` shares the same structure as `Batch` documents (a potential future extension would be to carry some events in the `Tip` as [some interim versions of the implementation once did](https://github.com/jet/equinox/pull/58), see also [#109](https://github.com/jet/equinox/pull/109). -- **`Equinox.Cosmos` `RollingState` and `Custom` 'non-event-sourced' modes**: Uses 'Tip with Unfolds' encoding to avoid having to write event documents at all - this enables one to build, reason about and test your aggregates in the normal manner, but inhibit event documents from being generated. This enables one to benefit from the caching and consistency management mechanisms without having to bear the cost of writing and storing the events themselves (and/or dealing with an ever-growing store size). Search for `transmute` or `RollingState` in the `samples` and/or see [the `Checkpoint` Aggregate in Propulsion](https://github.com/jet/propulsion/blob/master/src/Propulsion.EventStore/Checkpoint.fs). One chief use of this mechanism is for tracking Summary Event feeds in [the `dotnet-templates` `summaryConsumer` template](https://github.com/jet/dotnet-templates/tree/master/propulsion-summary-consumer). +- **`Equinox.CosmosStore` `RollingState` and `Custom` 'non-event-sourced' modes**: Uses 'Tip with Unfolds' encoding to avoid having to write event documents at all - this enables one to build, reason about and test your aggregates in the normal manner, but inhibit event documents from being generated. This enables one to benefit from the caching and consistency management mechanisms without having to bear the cost of writing and storing the events themselves (and/or dealing with an ever-growing store size). Search for `transmute` or `RollingState` in the `samples` and/or see [the `Checkpoint` Aggregate in Propulsion](https://github.com/jet/propulsion/blob/master/src/Propulsion.EventStore/Checkpoint.fs). One chief use of this mechanism is for tracking Summary Event feeds in [the `dotnet-templates` `summaryConsumer` template](https://github.com/jet/dotnet-templates/tree/master/propulsion-summary-consumer). ## Components @@ -77,7 +77,7 @@ The components within this repository are delivered as multi-targeted Nuget pack - `Equinox.Core` [![NuGet](https://img.shields.io/nuget/v/Equinox.Core.svg)](https://www.nuget.org/packages/Equinox.Core/): Interfaces and helpers used in realizing the concrete Store implementations, together with the default [`System.Runtime.Caching.Cache`-based] `Cache` implementation . ([depends](https://www.fuget.org/packages/Equinox.Core) on `Equinox`, `System.Runtime.Caching`) - `Equinox.MemoryStore` [![MemoryStore NuGet](https://img.shields.io/nuget/v/Equinox.MemoryStore.svg)](https://www.nuget.org/packages/Equinox.MemoryStore/): In-memory store for integration testing/performance baselining/providing out-of-the-box zero dependency storage for examples. ([depends](https://www.fuget.org/packages/Equinox.MemoryStore) on `Equinox.Core`, `FsCodec`) - `Equinox.EventStore` [![EventStore NuGet](https://img.shields.io/nuget/v/Equinox.EventStore.svg)](https://www.nuget.org/packages/Equinox.EventStore/): Production-strength [EventStoreDB](https://eventstore.org/) Adapter instrumented to the degree necessitated by Jet's production monitoring requirements. ([depends](https://www.fuget.org/packages/Equinox.EventStore) on `Equinox.Core`, `EventStore.Client >= 20.6`, `FSharp.Control.AsyncSeq >= 2.0.23`) -- `Equinox.Cosmos` [![Cosmos NuGet](https://img.shields.io/nuget/v/Equinox.Cosmos.svg)](https://www.nuget.org/packages/Equinox.Cosmos/): Production-strength Azure CosmosDB Adapter with integrated 'unfolds' feature, facilitating optimal read performance in terms of latency and RU costs, instrumented to the degree necessitated by Jet's production monitoring requirements. ([depends](https://www.fuget.org/packages/Equinox.Cosmos) on `Equinox.Core`, `Microsoft.Azure.Cosmos >= 3.9`, `FsCodec.NewtonsoftJson`, `FSharp.Control.AsyncSeq >= 2.0.23`) +- `Equinox.CosmosStore` [![Cosmos NuGet](https://img.shields.io/nuget/v/Equinox.CosmosStore.svg)](https://www.nuget.org/packages/Equinox.CosmosStore/): Production-strength Azure CosmosDB Adapter with integrated 'unfolds' feature, facilitating optimal read performance in terms of latency and RU costs, instrumented to the degree necessitated by Jet's production monitoring requirements. ([depends](https://www.fuget.org/packages/Equinox.CosmosStore) on `Equinox.Core`, `Microsoft.Azure.Cosmos >= 3.9`, `FsCodec.NewtonsoftJson`, `FSharp.Control.AsyncSeq >= 2.0.23`) - `Equinox.SqlStreamStore` [![SqlStreamStore NuGet](https://img.shields.io/nuget/v/Equinox.SqlStreamStore.svg)](https://www.nuget.org/packages/Equinox.SqlStreamStore/): Production-strength [SqlStreamStore](https://github.com/SQLStreamStore/SQLStreamStore) Adapter derived from `Equinox.EventStore` - provides core facilities (but does not connect to a specific database; see sibling `SqlStreamStore`.* packages). ([depends](https://www.fuget.org/packages/Equinox.SqlStreamStore) on `Equinox.Core`, `FsCodec`, `SqlStreamStore >= 1.2.0-beta.8`, `FSharp.Control.AsyncSeq`) - `Equinox.SqlStreamStore.MsSql` [![MsSql NuGet](https://img.shields.io/nuget/v/Equinox.SqlStreamStore.MsSql.svg)](https://www.nuget.org/packages/Equinox.SqlStreamStore.MsSql/): [SqlStreamStore.MsSql](https://sqlstreamstore.readthedocs.io/en/latest/sqlserver) Sql Server `Connector` implementation for `Equinox.SqlStreamStore` package). ([depends](https://www.fuget.org/packages/Equinox.SqlStreamStore.MsSql) on `Equinox.SqlStreamStore`, `SqlStreamStore.MsSql >= 1.2.0-beta.8`) - `Equinox.SqlStreamStore.MySql` [![MySql NuGet](https://img.shields.io/nuget/v/Equinox.SqlStreamStore.MySql.svg)](https://www.nuget.org/packages/Equinox.SqlStreamStore.MySql/): `SqlStreamStore.MySql` MySQL Í`Connector` implementation for `Equinox.SqlStreamStore` package). ([depends](https://www.fuget.org/packages/Equinox.SqlStreamStore.MySql) on `Equinox.SqlStreamStore`, `SqlStreamStore.MySql >= 1.2.0-beta.8`) @@ -89,7 +89,7 @@ Equinox does not focus on projection logic or wrapping thereof - each store brin - `FsKafka` [![FsKafka NuGet](https://img.shields.io/nuget/v/FsKafka.svg)](https://www.nuget.org/packages/FsKafka/): Wraps `Confluent.Kafka` to provide efficient batched Kafka Producer and Consumer configurations, with basic logging instrumentation. Used in the [`propulsion project kafka`](https://github.com/jet/propulsion#dotnet-tool-provisioning--projections-test-tool) tool command; see [`dotnet new proProjector -k; dotnet new proConsumer` to generate a sample app](https://github.com/jet/dotnet-templates#propulsion-related) using it (see the `BatchedAsync` and `BatchedSync` modules in `Examples.fs`). - `Propulsion` [![Propulsion NuGet](https://img.shields.io/nuget/v/Propulsion.svg)](https://www.nuget.org/packages/Propulsion/): defines a canonical `Propulsion.Streams.StreamEvent` used to interop with `Propulsion.*` in processing pipelines for the `proProjector` and `proSync` templates in the [templates repo](https://github.com/jet/dotnet-templates), together with the `Ingestion`, `Streams`, `Progress` and `Parallel` modules that get composed into those processing pipelines. ([depends](https://www.fuget.org/packages/Propulsion) on `Serilog`) -- `Propulsion.Cosmos` [![Propulsion.Cosmos NuGet](https://img.shields.io/nuget/v/Propulsion.Cosmos.svg)](https://www.nuget.org/packages/Propulsion.Cosmos/): Wraps the [Microsoft .NET `ChangeFeedProcessor` library](https://github.com/Azure/azure-documentdb-changefeedprocessor-dotnet) providing a [processor loop](DOCUMENTATION.md#change-feed-processors) that maintains a continuous query loop per CosmosDb Physical Partition (Range) yielding new or updated documents (optionally unrolling events written by `Equinox.Cosmos` for processing or forwarding). Used in the [`propulsion project stats cosmos`](dotnet-tool-provisioning--benchmarking-tool) tool command; see [`dotnet new proProjector` to generate a sample app](#quickstart) using it. ([depends](https://www.fuget.org/packages/Propulsion.Cosmos) on `Equinox.Cosmos`, `Microsoft.Azure.DocumentDb.ChangeFeedProcessor >= 2.2.5`) +- `Propulsion.Cosmos` [![Propulsion.Cosmos NuGet](https://img.shields.io/nuget/v/Propulsion.Cosmos.svg)](https://www.nuget.org/packages/Propulsion.Cosmos/): Wraps the [Microsoft .NET `ChangeFeedProcessor` library](https://github.com/Azure/azure-documentdb-changefeedprocessor-dotnet) providing a [processor loop](DOCUMENTATION.md#change-feed-processors) that maintains a continuous query loop per CosmosDb Physical Partition (Range) yielding new or updated documents (optionally unrolling events written by `Equinox.CosmosStore` for processing or forwarding). Used in the [`propulsion project stats cosmos`](dotnet-tool-provisioning--benchmarking-tool) tool command; see [`dotnet new proProjector` to generate a sample app](#quickstart) using it. ([depends](https://www.fuget.org/packages/Propulsion.Cosmos) on `Equinox.CosmosStore`, `Microsoft.Azure.DocumentDb.ChangeFeedProcessor >= 2.2.5`) - `Propulsion.EventStore` [![Propulsion.EventStore NuGet](https://img.shields.io/nuget/v/Propulsion.EventStore.svg)](https://www.nuget.org/packages/Propulsion.EventStore/) Used in the [`propulsion project es`](dotnet-tool-provisioning--benchmarking-tool) tool command; see [`dotnet new proSync` to generate a sample app](#quickstart) using it. ([depends](https://www.fuget.org/packages/Propulsion.EventStore) on `Equinox.EventStore`) - `Propulsion.Kafka` [![Propulsion.Kafka NuGet](https://img.shields.io/nuget/v/Propulsion.Kafka.svg)](https://www.nuget.org/packages/Propulsion.Kafka/): Provides a canonical `RenderedSpan` that can be used as a default format when projecting events via e.g. the Producer/Consumer pair in `dotnet new proProjector -k; dotnet new proConsumer`. ([depends](https://www.fuget.org/packages/Propulsion.Kafka) on `Newtonsoft.Json >= 11.0.2`, `Propulsion`, `FsKafka`) @@ -586,16 +586,16 @@ The secondary benefit is of course that you have an absolute guarantee there wil `Equinox.SqlStreamStore` implements this scheme too - it's easier to do things like e.g. replace the bodies of snapshot events with `nulls` as a maintenance task in that instance -Initially, `Equinox.Cosmos` implemented the same strategy as the `Equinox.EventStore` (it started as a cut and paste of the it). However the present implementation takes advantage of the fact that in a Document Store, you can ... update documents - thus, snapshots (termed unfolds) are saved in a custom field (it's an array) in the Tip document - every update includes an updated snapshot (which is zipped to save read and write costs) which overwrites the preceding unfolds. You're currently always guaranteed that the snapshots are in sync with the latest event by virtue of how the stored proc writes. A DynamoDb impl would likely follow the same strategy +Initially, `Equinox.CosmosStore` implemented the same strategy as the `Equinox.EventStore` (it started as a cut and paste of the it). However the present implementation takes advantage of the fact that in a Document Store, you can ... update documents - thus, snapshots (termed unfolds) are saved in a custom field (it's an array) in the Tip document - every update includes an updated snapshot (which is zipped to save read and write costs) which overwrites the preceding unfolds. You're currently always guaranteed that the snapshots are in sync with the latest event by virtue of how the stored proc writes. A DynamoDb impl would likely follow the same strategy I expand (too much!) on some more of the considerations in https://github.com/jet/equinox/blob/master/DOCUMENTATION.md The other thing that should be pointed out is the caching can typically cover a lot of perf stuff as long as stream lengths stay sane - Snapshotting (esp polluting the stream with snapshot events should definitely be toward the bottom of your list of tactics for managing a stream efficiently given long streams are typically a design smell) -### Changing Access / Representation strategies in `Equinox.Cosmos` - what happens? +### Changing Access / Representation strategies in `Equinox.CosmosStore` - what happens? -> Does Equinox adapt the stream if we start writing with `Equinox.Cosmos.AccessStrategy.RollingState` and change to `Snapshotted` for instance? It could take the last RollingState writing and make the first snapshot ? +> Does Equinox adapt the stream if we start writing with `Equinox.CosmosStore.AccessStrategy.RollingState` and change to `Snapshotted` for instance? It could take the last RollingState writing and make the first snapshot ? > what about the opposite? It deletes all events and start writing `RollingState` ? @@ -617,7 +617,7 @@ General rules: a) load and decode unfolds from tip (followed by events, if and only if necessary) b) offer the events to an `isOrigin` function to allow us to stop when we've got a start point (a Reset Event, a relevant snapshot, or, failing that, the start of the stream) -It may be helpful to look at [how an `AccessStrategy` is mapped to `isOrigin`, `toSnapshot` and `transmute` lambdas internally](https://github.com/jet/equinox/blob/74129903e85e01ce584b4449f629bf3e525515ea/src/Equinox.Cosmos/Cosmos.fs#L1029) +It may be helpful to look at [how an `AccessStrategy` is mapped to `isOrigin`, `toSnapshot` and `transmute` lambdas internally](https://github.com/jet/equinox/blob/master/src/Equinox.CosmosStore/CosmosStore.fs#L1016) #### Aaand answering the question @@ -636,7 +636,7 @@ Then, whenever you emit events from a `decide` or `interpret`, the `AccessStrate - write updated unfolds/snapshots - remove or adjust events before they get passed down to the `sync` stored procedure (`Custom`, `RollingState`, `LatestKnownEvent` modes) -Ouch, not looking forward to reading all that logic :frown: ? [Have a read, it's really not that :scream:](https://github.com/jet/equinox/blob/74129903e85e01ce584b4449f629bf3e525515ea/src/Equinox.Cosmos/Cosmos.fs#L870). +Ouch, not looking forward to reading all that logic :frown: ? [Have a read, it's really not that :scream:](https://github.com/jet/equinox/blob/master/src/Equinox.CosmosStore/CosmosStore.fs#1011). ### OK, but you didn't answer my question, you just talked about stuff you wanted to talk about! diff --git a/build.proj b/build.proj index 99309aa4b..0d5e2e31a 100644 --- a/build.proj +++ b/build.proj @@ -16,7 +16,7 @@ - + diff --git a/diagrams/CosmosCode.puml b/diagrams/CosmosCode.puml index 3f79f20f5..5a93d14f6 100644 --- a/diagrams/CosmosCode.puml +++ b/diagrams/CosmosCode.puml @@ -1,5 +1,5 @@ @startuml -title Code diagram for Equinox.Cosmos Query operation, with empty cache and nothing written to the stream yet +title Code diagram for Equinox.CosmosStore Query operation, with empty cache and nothing written to the stream yet actor Caller order 20 box "Equinox.Stream" @@ -7,7 +7,7 @@ box "Equinox.Stream" end box participant Aggregate order 50 participant Service order 60 -box "Equinox.Cosmos / CosmosDB" +box "Equinox.CosmosStore / CosmosDB" participant IStream order 80 collections Cache order 90 database CosmosDB order 100 @@ -30,7 +30,7 @@ Stream -> Caller: {result = list } @enduml @startuml -title Code diagram for Equinox.Cosmos Transact operation, with cache up to date using Snapshotting Access Strategy +title Code diagram for Equinox.CosmosStore Transact operation, with cache up to date using Snapshotting Access Strategy actor Caller order 20 box "Equinox.Stream" @@ -38,7 +38,7 @@ box "Equinox.Stream" end box participant Aggregate order 50 participant Service order 60 -box "Equinox.Cosmos / CosmosDB" +box "Equinox.CosmosStore / CosmosDB" participant IStream order 80 collections Cache order 90 database CosmosDB order 100 @@ -115,7 +115,7 @@ Stream -> Caller: proposedResult @enduml @startuml -title Code diagram for Equinox.Cosmos Query operation immediately following a Query/Transact on the same node, i.e. cached +title Code diagram for Equinox.CosmosStore Query operation immediately following a Query/Transact on the same node, i.e. cached actor Caller order 20 box "Equinox.Stream" @@ -123,7 +123,7 @@ box "Equinox.Stream" end box participant Aggregate order 50 participant Service order 60 -box "Equinox.Cosmos / CosmosDB" +box "Equinox.CosmosStore / CosmosDB" participant IStream order 80 collections Cache order 90 database CosmosDB order 100 @@ -144,7 +144,7 @@ Aggregate -> Caller: result @enduml @startuml -title Code diagram for Equinox.Cosmos Query operation on a node without an in-sync cached value (with snapshotting Access Strategy) +title Code diagram for Equinox.CosmosStore Query operation on a node without an in-sync cached value (with snapshotting Access Strategy) actor Caller order 20 box "Equinox.Stream" @@ -152,7 +152,7 @@ box "Equinox.Stream" end box participant Aggregate order 50 participant Service order 60 -box "Equinox.Cosmos / CosmosDB" +box "Equinox.CosmosStore / CosmosDB" participant IStream order 80 collections Cache order 90 database CosmosDB order 100 diff --git a/diagrams/CosmosComponent.puml b/diagrams/CosmosComponent.puml index 5bed95a2f..9a996e845 100644 --- a/diagrams/CosmosComponent.puml +++ b/diagrams/CosmosComponent.puml @@ -1,7 +1,7 @@ @startuml !includeurl https://raw.githubusercontent.com/skleanthous/C4-PlantumlSkin/master/build/output/c4.puml -title Component diagram for Equinox.Cosmos (+ Propulsion.Cosmos) +title Component diagram for Equinox.CosmosStore (+ Propulsion.Cosmos) caption Moving parts breakdown for an Equinox app using a CosmosDB container actor "Applications" <> as apps @@ -23,9 +23,9 @@ rectangle "Consistent Processing" <> { ] } frame Store { - rectangle "Equinox.Cosmos" <> { + rectangle "Equinox.CosmosStore" <> { rectangle eqxcosmos <> [ - Equinox.Cosmos + Equinox.CosmosStore ] database memorycache <> [ **System.MemoryCache** diff --git a/diagrams/CosmosContainer.puml b/diagrams/CosmosContainer.puml index b3087a6ac..2ed352148 100644 --- a/diagrams/CosmosContainer.puml +++ b/diagrams/CosmosContainer.puml @@ -1,7 +1,7 @@ @startuml !includeurl https://raw.githubusercontent.com/skleanthous/C4-PlantumlSkin/master/build/output/c4.puml -title Container diagram for Equinox.Cosmos (+ Propulsion.Cosmos) +title Container diagram for Equinox.CosmosStore (+ Propulsion.Cosmos) caption Moving parts for an Equinox app using a CosmosDB container that presently has 2 physical partitions actor "Applications" <> as apps @@ -29,7 +29,7 @@ rectangle "Consistent Processing" <> { ] rectangle eqxcosmos <> [ - Equinox.Cosmos + Equinox.CosmosStore ] cloud "CosmosDB Database D" as db { diff --git a/diagrams/container.puml b/diagrams/container.puml index f208b861c..2822de086 100644 --- a/diagrams/container.puml +++ b/diagrams/container.puml @@ -23,7 +23,7 @@ together { frame "Consistent Event Stores" as stores <> { frame "Cosmos" as cosmos <> { - rectangle "Equinox.Cosmos" <> as cs + rectangle "Equinox.CosmosStore" <> as cs rectangle "Propulsion.Cosmos" <> as cr rectangle "Azure.Cosmos" <> as cc } diff --git a/samples/Infrastructure/Infrastructure.fsproj b/samples/Infrastructure/Infrastructure.fsproj index d073373c8..b79e4f928 100644 --- a/samples/Infrastructure/Infrastructure.fsproj +++ b/samples/Infrastructure/Infrastructure.fsproj @@ -19,7 +19,7 @@ - + diff --git a/samples/Infrastructure/Services.fs b/samples/Infrastructure/Services.fs index 2ba19ba9b..6ff9b8ded 100644 --- a/samples/Infrastructure/Services.fs +++ b/samples/Infrastructure/Services.fs @@ -11,10 +11,9 @@ type StreamResolver(storage) = initial: 'state, snapshot: (('event -> bool) * ('state -> 'event))) = match storage with - | Storage.StorageConfig.Cosmos (gateway, caching, unfolds, databaseId, containerId) -> - let store = Equinox.Cosmos.Context(gateway, databaseId, containerId) - let accessStrategy = if unfolds then Equinox.Cosmos.AccessStrategy.Snapshot snapshot else Equinox.Cosmos.AccessStrategy.Unoptimized - Equinox.Cosmos.Resolver<'event,'state,_>(store, codec, fold, initial, caching, accessStrategy).Resolve + | Storage.StorageConfig.Cosmos (store, caching, unfolds) -> + let accessStrategy = if unfolds then Equinox.CosmosStore.AccessStrategy.Snapshot snapshot else Equinox.CosmosStore.AccessStrategy.Unoptimized + Equinox.CosmosStore.CosmosStoreCategory<'event,'state,_>(store, codec, fold, initial, caching, accessStrategy).Resolve | Storage.StorageConfig.Es (context, caching, unfolds) -> let accessStrategy = if unfolds then Equinox.EventStore.AccessStrategy.RollingSnapshots snapshot |> Some else None Equinox.EventStore.Resolver<'event,'state,_>(context, codec, fold, initial, ?caching = caching, ?access = accessStrategy).Resolve diff --git a/samples/Infrastructure/Storage.fs b/samples/Infrastructure/Storage.fs index 35c7d21e1..32ae868a7 100644 --- a/samples/Infrastructure/Storage.fs +++ b/samples/Infrastructure/Storage.fs @@ -10,7 +10,7 @@ type StorageConfig = // For MemoryStore, we keep the events as UTF8 arrays - we could use FsCodec.Codec.Box to remove the JSON encoding, which would improve perf but can conceal problems | Memory of Equinox.MemoryStore.VolatileStore | Es of Equinox.EventStore.Context * Equinox.EventStore.CachingStrategy option * unfolds: bool - | Cosmos of Equinox.Cosmos.Gateway * Equinox.Cosmos.CachingStrategy * unfolds: bool * databaseId: string * containerId: string + | Cosmos of Equinox.CosmosStore.CosmosStoreContext * Equinox.CosmosStore.CachingStrategy * unfolds: bool | Sql of Equinox.SqlStreamStore.Context * Equinox.SqlStreamStore.CachingStrategy option * unfolds: bool module MemoryStore = @@ -67,22 +67,23 @@ module Cosmos = /// 1) replace connection below with a connection string or Uri+Key for an initialized Equinox instance with a database and collection named "equinox-test" /// 2) Set the 3x environment variables and create a local Equinox using tools/Equinox.Tool/bin/Release/net461/eqx.exe ` /// init -ru 1000 cosmos -s $env:EQUINOX_COSMOS_CONNECTION -d $env:EQUINOX_COSMOS_DATABASE -c $env:EQUINOX_COSMOS_CONTAINER - open Equinox.Cosmos + open Equinox.CosmosStore open Serilog - let private createGateway connection maxItems = Gateway(connection, BatchingPolicy(defaultMaxItems=maxItems)) - let connection (log: ILogger, storeLog: ILogger) (a : Info) = - let (Discovery.UriAndKey (endpointUri,_)) as discovery = a.Connection |> Discovery.FromConnectionString + let conn (log: ILogger) (a : Info) = + let discovery = Discovery.ConnectionString a.Connection + let client = CosmosStoreClientFactory(a.Timeout, a.Retries, a.MaxRetryWaitTime, mode=a.Mode).Create(discovery) log.Information("CosmosDb {mode} {connection} Database {database} Container {container}", - a.Mode, endpointUri, a.Database, a.Container) + a.Mode, client.Endpoint, a.Database, a.Container) log.Information("CosmosDb timeout {timeout}s; Throttling retries {retries}, max wait {maxRetryWaitTime}s", (let t = a.Timeout in t.TotalSeconds), a.Retries, let x = a.MaxRetryWaitTime in x.TotalSeconds) - discovery, a.Database, a.Container, Connector(a.Timeout, a.Retries, a.MaxRetryWaitTime, log=storeLog, mode=a.Mode) - let config (log: ILogger, storeLog) (cache, unfolds, batchSize) info = - let discovery, dName, cName, connector = connection (log, storeLog) info - let conn = connector.Connect(appName, discovery) |> Async.RunSynchronously + client, a.Database, a.Container + let config (log: ILogger) (cache, unfolds, batchSize) info = + let client, databaseId, containerId = conn log info + let conn = CosmosStoreConnection(client, databaseId, containerId) + let ctx = CosmosStoreContext(conn, defaultMaxItems = batchSize) let cacheStrategy = match cache with Some c -> CachingStrategy.SlidingWindow (c, TimeSpan.FromMinutes 20.) | None -> CachingStrategy.NoCaching - StorageConfig.Cosmos (createGateway conn batchSize, cacheStrategy, unfolds, dName, cName) + StorageConfig.Cosmos (ctx, cacheStrategy, unfolds) /// To establish a local node to run the tests against: /// 1. cinst eventstore-oss -y # where cinst is an invocation of the Chocolatey Package Installer on Windows diff --git a/samples/Store/Integration/CartIntegration.fs b/samples/Store/Integration/CartIntegration.fs index 462d45300..acbcaabfd 100644 --- a/samples/Store/Integration/CartIntegration.fs +++ b/samples/Store/Integration/CartIntegration.fs @@ -1,7 +1,7 @@ module Samples.Store.Integration.CartIntegration open Equinox -open Equinox.Cosmos.Integration +open Equinox.CosmosStore.Integration open Swensen.Unquote #nowarn "1182" // From hereon in, we may have some 'unused' privates (the tests) @@ -21,9 +21,9 @@ let resolveGesStreamWithoutCustomAccessStrategy gateway = fun (id,opt) -> EventStore.Resolver(gateway, codec, fold, initial).Resolve(id,?option=opt) let resolveCosmosStreamWithSnapshotStrategy gateway = - fun (id,opt) -> Cosmos.Resolver(gateway, codec, fold, initial, Cosmos.CachingStrategy.NoCaching, Cosmos.AccessStrategy.Snapshot snapshot).Resolve(id,?option=opt) + fun (id,opt) -> CosmosStore.CosmosStoreCategory(gateway, codec, fold, initial, CosmosStore.CachingStrategy.NoCaching, CosmosStore.AccessStrategy.Snapshot snapshot).Resolve(id,?option=opt) let resolveCosmosStreamWithoutCustomAccessStrategy gateway = - fun (id,opt) -> Cosmos.Resolver(gateway, codec, fold, initial, Cosmos.CachingStrategy.NoCaching, Cosmos.AccessStrategy.Unoptimized).Resolve(id,?option=opt) + fun (id,opt) -> CosmosStore.CosmosStoreCategory(gateway, codec, fold, initial, CosmosStore.CachingStrategy.NoCaching, CosmosStore.AccessStrategy.Unoptimized).Resolve(id,?option=opt) let addAndThenRemoveItemsManyTimesExceptTheLastOne context cartId skuId (service: Backend.Cart.Service) count = service.ExecuteManyAsync(cartId, false, seq { @@ -50,7 +50,7 @@ type Tests(testOutputHelper) = do! act service args } - let arrange connect choose resolve = async { + let arrangeEs connect choose resolve = async { let log = createLog () let! conn = connect log let gateway = choose conn defaultBatchSize @@ -58,24 +58,29 @@ type Tests(testOutputHelper) = [] let ``Can roundtrip against EventStore, correctly folding the events without compaction semantics`` args = Async.RunSynchronously <| async { - let! service = arrange connectToLocalEventStoreNode createGesGateway resolveGesStreamWithoutCustomAccessStrategy + let! service = arrangeEs connectToLocalEventStoreNode createGesGateway resolveGesStreamWithoutCustomAccessStrategy do! act service args } [] let ``Can roundtrip against EventStore, correctly folding the events with RollingSnapshots`` args = Async.RunSynchronously <| async { - let! service = arrange connectToLocalEventStoreNode createGesGateway resolveGesStreamWithRollingSnapshots + let! service = arrangeEs connectToLocalEventStoreNode createGesGateway resolveGesStreamWithRollingSnapshots do! act service args } + let arrangeCosmos connect resolve = + let log = createLog () + let ctx : CosmosStore.CosmosStoreContext = connect log defaultBatchSize + Backend.Cart.create log (resolve ctx) + [] let ``Can roundtrip against Cosmos, correctly folding the events without custom access strategy`` args = Async.RunSynchronously <| async { - let! service = arrange connectToSpecifiedCosmosOrSimulator createCosmosContext resolveCosmosStreamWithoutCustomAccessStrategy + let service = arrangeCosmos createPrimaryContext resolveCosmosStreamWithoutCustomAccessStrategy do! act service args } [] let ``Can roundtrip against Cosmos, correctly folding the events with With Snapshotting`` args = Async.RunSynchronously <| async { - let! service = arrange connectToSpecifiedCosmosOrSimulator createCosmosContext resolveCosmosStreamWithSnapshotStrategy + let service = arrangeCosmos createPrimaryContext resolveCosmosStreamWithSnapshotStrategy do! act service args } diff --git a/samples/Store/Integration/CodecIntegration.fs b/samples/Store/Integration/CodecIntegration.fs index 057ae35ce..f5713439a 100644 --- a/samples/Store/Integration/CodecIntegration.fs +++ b/samples/Store/Integration/CodecIntegration.fs @@ -24,7 +24,7 @@ type SimpleDu = | EventD // See JsonConverterTests for why these are ruled out atm //| EventE of int // works but disabled due to Strings and DateTimes not working - //| EventF of string // has wierd semantics, particularly when used with a VerbatimJsonConverter in Equinox.Cosmos + //| EventF of string // has wierd semantics, particularly when used with a VerbatimJsonConverter in Equinox.CosmosStore interface IUnionContract let render = function @@ -46,4 +46,4 @@ let ``Can roundtrip, rendering correctly`` (x: SimpleDu) = render x =! if serialized.Data = null then null else System.Text.Encoding.UTF8.GetString(serialized.Data) let adapted = FsCodec.Core.TimelineEvent.Create(-1L, serialized.EventType, serialized.Data) let deserialized = codec.TryDecode adapted |> Option.get - deserialized =! x \ No newline at end of file + deserialized =! x diff --git a/samples/Store/Integration/ContactPreferencesIntegration.fs b/samples/Store/Integration/ContactPreferencesIntegration.fs index 178a6158e..b3954a632 100644 --- a/samples/Store/Integration/ContactPreferencesIntegration.fs +++ b/samples/Store/Integration/ContactPreferencesIntegration.fs @@ -1,7 +1,7 @@ module Samples.Store.Integration.ContactPreferencesIntegration open Equinox -open Equinox.Cosmos.Integration +open Equinox.CosmosStore.Integration open Swensen.Unquote #nowarn "1182" // From hereon in, we may have some 'unused' privates (the tests) @@ -18,13 +18,13 @@ let resolveStreamGesWithOptimizedStorageSemantics gateway = let resolveStreamGesWithoutAccessStrategy gateway = EventStore.Resolver(gateway defaultBatchSize, codec, fold, initial).Resolve -let resolveStreamCosmosWithLatestKnownEventSemantics gateway = - Cosmos.Resolver(gateway 1, codec, fold, initial, Cosmos.CachingStrategy.NoCaching, Cosmos.AccessStrategy.LatestKnownEvent).Resolve -let resolveStreamCosmosUnoptimized gateway = - Cosmos.Resolver(gateway defaultBatchSize, codec, fold, initial, Cosmos.CachingStrategy.NoCaching, Cosmos.AccessStrategy.Unoptimized).Resolve -let resolveStreamCosmosRollingUnfolds gateway = - let access = Cosmos.AccessStrategy.Custom(Domain.ContactPreferences.Fold.isOrigin, Domain.ContactPreferences.Fold.transmute) - Cosmos.Resolver(gateway defaultBatchSize, codec, fold, initial, Cosmos.CachingStrategy.NoCaching, access).Resolve +let resolveStreamCosmosWithLatestKnownEventSemantics context = + CosmosStore.CosmosStoreCategory(context, codec, fold, initial, CosmosStore.CachingStrategy.NoCaching, CosmosStore.AccessStrategy.LatestKnownEvent).Resolve +let resolveStreamCosmosUnoptimized context = + CosmosStore.CosmosStoreCategory(context, codec, fold, initial, CosmosStore.CachingStrategy.NoCaching, CosmosStore.AccessStrategy.Unoptimized).Resolve +let resolveStreamCosmosRollingUnfolds context = + let access = CosmosStore.AccessStrategy.Custom(Domain.ContactPreferences.Fold.isOrigin, Domain.ContactPreferences.Fold.transmute) + CosmosStore.CosmosStoreCategory(context, codec, fold, initial, CosmosStore.CachingStrategy.NoCaching, access).Resolve type Tests(testOutputHelper) = let testOutput = TestOutputAdapter testOutputHelper @@ -61,20 +61,25 @@ type Tests(testOutputHelper) = do! act service args } + let arrangeCosmos connect resolve batchSize = + let log = createLog () + let ctx: CosmosStore.CosmosStoreContext = connect log batchSize + Backend.ContactPreferences.create log (resolve ctx) + [] let ``Can roundtrip against Cosmos, correctly folding the events with Unoptimized semantics`` args = Async.RunSynchronously <| async { - let! service = arrange connectToSpecifiedCosmosOrSimulator createCosmosContext resolveStreamCosmosUnoptimized + let service = arrangeCosmos createPrimaryContext resolveStreamCosmosUnoptimized defaultBatchSize do! act service args } [] let ``Can roundtrip against Cosmos, correctly folding the events with LatestKnownEvent semantics`` args = Async.RunSynchronously <| async { - let! service = arrange connectToSpecifiedCosmosOrSimulator createCosmosContext resolveStreamCosmosWithLatestKnownEventSemantics + let service = arrangeCosmos createPrimaryContext resolveStreamCosmosWithLatestKnownEventSemantics 1 do! act service args } [] let ``Can roundtrip against Cosmos, correctly folding the events with RollingUnfold semantics`` args = Async.RunSynchronously <| async { - let! service = arrange connectToSpecifiedCosmosOrSimulator createCosmosContext resolveStreamCosmosRollingUnfolds + let service = arrangeCosmos createPrimaryContext resolveStreamCosmosRollingUnfolds defaultBatchSize do! act service args } diff --git a/samples/Store/Integration/FavoritesIntegration.fs b/samples/Store/Integration/FavoritesIntegration.fs index da93ce82f..76a06a733 100644 --- a/samples/Store/Integration/FavoritesIntegration.fs +++ b/samples/Store/Integration/FavoritesIntegration.fs @@ -1,7 +1,7 @@ module Samples.Store.Integration.FavoritesIntegration open Equinox -open Equinox.Cosmos.Integration +open Equinox.CosmosStore.Integration open Swensen.Unquote #nowarn "1182" // From hereon in, we may have some 'unused' privates (the tests) @@ -19,12 +19,12 @@ let createServiceGes gateway log = Backend.Favorites.create log resolver.Resolve let createServiceCosmos gateway log = - let resolver = Cosmos.Resolver(gateway, codec, fold, initial, Cosmos.CachingStrategy.NoCaching, Cosmos.AccessStrategy.Snapshot snapshot) + let resolver = CosmosStore.CosmosStoreCategory(gateway, codec, fold, initial, CosmosStore.CachingStrategy.NoCaching, CosmosStore.AccessStrategy.Snapshot snapshot) Backend.Favorites.create log resolver.Resolve let createServiceCosmosRollingState gateway log = - let access = Cosmos.AccessStrategy.RollingState Domain.Favorites.Fold.snapshot - let resolver = Cosmos.Resolver(gateway, codec, fold, initial, Cosmos.CachingStrategy.NoCaching, access) + let access = CosmosStore.AccessStrategy.RollingState Domain.Favorites.Fold.snapshot + let resolver = CosmosStore.CosmosStoreCategory(gateway, codec, fold, initial, CosmosStore.CachingStrategy.NoCaching, access) Backend.Favorites.create log resolver.Resolve type Tests(testOutputHelper) = @@ -60,17 +60,16 @@ type Tests(testOutputHelper) = [] let ``Can roundtrip against Cosmos, correctly folding the events`` args = Async.RunSynchronously <| async { let log = createLog () - let! conn = connectToSpecifiedCosmosOrSimulator log - let gateway = createCosmosContext conn defaultBatchSize - let service = createServiceCosmos gateway log + let store = createPrimaryContext log defaultBatchSize + let service = createServiceCosmos store log do! act service args } [] let ``Can roundtrip against Cosmos, correctly folding the events with rolling unfolds`` args = Async.RunSynchronously <| async { let log = createLog () - let! conn = connectToSpecifiedCosmosOrSimulator log - let gateway = createCosmosContext conn defaultBatchSize - let service = createServiceCosmosRollingState gateway log + let log = createLog () + let store = createPrimaryContext log defaultBatchSize + let service = createServiceCosmosRollingState store log do! act service args } diff --git a/samples/Store/Integration/Integration.fsproj b/samples/Store/Integration/Integration.fsproj index 6c0d14cfe..d29db5e2b 100644 --- a/samples/Store/Integration/Integration.fsproj +++ b/samples/Store/Integration/Integration.fsproj @@ -18,11 +18,11 @@ - + - + diff --git a/samples/Store/Integration/LogIntegration.fs b/samples/Store/Integration/LogIntegration.fs index c4bf13efb..2dcc1dc25 100644 --- a/samples/Store/Integration/LogIntegration.fs +++ b/samples/Store/Integration/LogIntegration.fs @@ -1,7 +1,7 @@ module Samples.Store.Integration.LogIntegration open Equinox.Core -open Equinox.Cosmos.Integration +open Equinox.CosmosStore.Integration open FSharp.UMX open Swensen.Unquote open System @@ -23,7 +23,7 @@ module EquinoxEsInterop = | Log.Batch (Direction.Backward,c,m) -> "LoadB", m, Some c { action = action; stream = metric.stream; interval = metric.interval; bytes = metric.bytes; count = metric.count; batches = batches } module EquinoxCosmosInterop = - open Equinox.Cosmos.Store + open Equinox.CosmosStore.Core [] type FlatMetric = { action: string; stream : string; interval: StopwatchInterval; bytes: int; count: int; responses: int option; ru: float } with override __.ToString() = sprintf "%s-Stream=%s %s-Elapsed=%O Ru=%O" __.action __.stream __.action __.interval.Elapsed __.ru @@ -65,7 +65,7 @@ type SerilogMetricsExtractor(emit : string -> unit) = logEvent.Properties |> Seq.tryPick (function | KeyValue (k, SerilogScalar (:? Equinox.EventStore.Log.Event as m)) -> Some <| Choice1Of3 (k,m) - | KeyValue (k, SerilogScalar (:? Equinox.Cosmos.Store.Log.Event as m)) -> Some <| Choice2Of3 (k,m) + | KeyValue (k, SerilogScalar (:? Equinox.CosmosStore.Core.Log.Event as m)) -> Some <| Choice2Of3 (k,m) | _ -> None) |> Option.defaultValue (Choice3Of3 ()) let handleLogEvent logEvent = @@ -125,9 +125,8 @@ type Tests() = let batchSize = defaultBatchSize let buffer = ConcurrentQueue() let log = createLoggerWithMetricsExtraction buffer.Enqueue - let! conn = connectToSpecifiedCosmosOrSimulator log - let gateway = createCosmosContext conn batchSize - let service = Backend.Cart.create log (CartIntegration.resolveCosmosStreamWithSnapshotStrategy gateway) + let store = createPrimaryContext log batchSize + let service = Backend.Cart.create log (CartIntegration.resolveCosmosStreamWithSnapshotStrategy store) let itemCount = batchSize / 2 + 1 let cartId = % Guid.NewGuid() do! act buffer service itemCount context cartId skuId "EqxCosmos Tip " // one is a 404, one is a 200 diff --git a/samples/Tutorial/AsAt.fsx b/samples/Tutorial/AsAt.fsx index 691ddeba7..f99209554 100644 --- a/samples/Tutorial/AsAt.fsx +++ b/samples/Tutorial/AsAt.fsx @@ -33,7 +33,7 @@ #r "Equinox.EventStore.dll" #r "Microsoft.Azure.Cosmos.Direct.dll" #r "Microsoft.Azure.Cosmos.Client.dll" -#r "Equinox.Cosmos.dll" +#r "Equinox.CosmosStore.dll" open System @@ -124,12 +124,12 @@ module Log = let c = LoggerConfiguration() let c = if verbose then c.MinimumLevel.Debug() else c let c = c.WriteTo.Sink(Equinox.EventStore.Log.InternalMetrics.Stats.LogSink()) // to power Log.InternalMetrics.dump - let c = c.WriteTo.Sink(Equinox.Cosmos.Store.Log.InternalMetrics.Stats.LogSink()) // to power Log.InternalMetrics.dump + let c = c.WriteTo.Sink(Equinox.CosmosStore.Core.Log.InternalMetrics.Stats.LogSink()) // to power Log.InternalMetrics.dump let c = c.WriteTo.Seq("http://localhost:5341") // https://getseq.net let c = c.WriteTo.Console(if verbose then LogEventLevel.Debug else LogEventLevel.Information) c.CreateLogger() let dumpMetrics () = - Equinox.Cosmos.Store.Log.InternalMetrics.dump log + Equinox.CosmosStore.Core.Log.InternalMetrics.dump log Equinox.EventStore.Log.InternalMetrics.dump log let [] appName = "equinox-tutorial" @@ -153,7 +153,7 @@ module EventStore = let resolve id = Equinox.Stream(Log.log, resolver.Resolve(streamName id), maxAttempts = 3) module Cosmos = - open Equinox.Cosmos + open Equinox.CosmosStore let read key = System.Environment.GetEnvironmentVariable key |> Option.ofObj |> Option.get let connector = Connector(TimeSpan.FromSeconds 5., 2, TimeSpan.FromSeconds 5., log=Log.log, mode=Microsoft.Azure.Cosmos.ConnectionMode.Gateway) @@ -161,7 +161,7 @@ module Cosmos = let context = Context(conn, read "EQUINOX_COSMOS_DATABASE", read "EQUINOX_COSMOS_CONTAINER") let cacheStrategy = CachingStrategy.SlidingWindow (cache, TimeSpan.FromMinutes 20.) // OR CachingStrategy.NoCaching let accessStrategy = AccessStrategy.Snapshot (Fold.isValid,Fold.snapshot) - let resolver = Resolver(context, Events.codec, Fold.fold, Fold.initial, cacheStrategy, accessStrategy) + let resolver = CosmosStoreCategory(context, Events.codec, Fold.fold, Fold.initial, cacheStrategy, accessStrategy) let resolve id = Equinox.Stream(Log.log, resolver.Resolve(streamName id), maxAttempts = 3) let serviceES = Service(EventStore.resolve) diff --git a/samples/Tutorial/Cosmos.fsx b/samples/Tutorial/Cosmos.fsx index 8c9b7942f..4a44cd975 100644 --- a/samples/Tutorial/Cosmos.fsx +++ b/samples/Tutorial/Cosmos.fsx @@ -17,7 +17,7 @@ #r "Microsoft.Azure.Cosmos.Client.dll" #r "System.Net.Http" #r "Serilog.Sinks.Seq.dll" -#r "Equinox.Cosmos.dll" +#r "Equinox.CosmosStore.dll" module Log = @@ -27,11 +27,11 @@ module Log = let log = let c = LoggerConfiguration() let c = if verbose then c.MinimumLevel.Debug() else c - let c = c.WriteTo.Sink(Equinox.Cosmos.Store.Log.InternalMetrics.Stats.LogSink()) // to power Log.InternalMetrics.dump + let c = c.WriteTo.Sink(Equinox.CosmosStore.Core.Log.InternalMetrics.Stats.LogSink()) // to power Log.InternalMetrics.dump let c = c.WriteTo.Seq("http://localhost:5341") // https://getseq.net let c = c.WriteTo.Console(if verbose then LogEventLevel.Debug else LogEventLevel.Information) c.CreateLogger() - let dumpMetrics () = Equinox.Cosmos.Store.Log.InternalMetrics.dump log + let dumpMetrics () = Equinox.CosmosStore.Core.Log.InternalMetrics.dump log module Favorites = @@ -82,11 +82,11 @@ module Favorites = module Cosmos = - open Equinox.Cosmos // Everything outside of this module is completely storage agnostic so can be unit tested simply and/or bound to any store + open Equinox.CosmosStore // Everything outside of this module is completely storage agnostic so can be unit tested simply and/or bound to any store let accessStrategy = AccessStrategy.Unoptimized // Or Snapshot etc https://github.com/jet/equinox/blob/master/DOCUMENTATION.md#access-strategies let create (context, cache) = let cacheStrategy = CachingStrategy.SlidingWindow (cache, System.TimeSpan.FromMinutes 20.) // OR CachingStrategy.NoCaching - let resolver = Resolver(context, Events.codec, Fold.fold, Fold.initial, cacheStrategy, accessStrategy) + let resolver = CosmosStoreCategory(context, Events.codec, Fold.fold, Fold.initial, cacheStrategy, accessStrategy) create resolver.Resolve let [] appName = "equinox-tutorial" @@ -94,9 +94,9 @@ let [] appName = "equinox-tutorial" module Store = let read key = System.Environment.GetEnvironmentVariable key |> Option.ofObj |> Option.get - let connector = Equinox.Cosmos.Connector(System.TimeSpan.FromSeconds 5., 2, System.TimeSpan.FromSeconds 5., log=Log.log) - let conn = connector.Connect(appName, Equinox.Cosmos.Discovery.FromConnectionString (read "EQUINOX_COSMOS_CONNECTION")) |> Async.RunSynchronously - let createContext () = Equinox.Cosmos.Context(conn, read "EQUINOX_COSMOS_DATABASE", read "EQUINOX_COSMOS_CONTAINER") + let connector = Equinox.CosmosStore.Connector(System.TimeSpan.FromSeconds 5., 2, System.TimeSpan.FromSeconds 5., log=Log.log) + let conn = connector.Connect(appName, Equinox.CosmosStore.Discovery.FromConnectionString (read "EQUINOX_COSMOS_CONNECTION")) |> Async.RunSynchronously + let createContext () = Equinox.CosmosStore.Context(conn, read "EQUINOX_COSMOS_DATABASE", read "EQUINOX_COSMOS_CONTAINER") let context = Store.createContext () let cache = Equinox.Cache(appName, 20) diff --git a/samples/Tutorial/FulfilmentCenter.fsx b/samples/Tutorial/FulfilmentCenter.fsx index 18972d4f7..022b3be58 100644 --- a/samples/Tutorial/FulfilmentCenter.fsx +++ b/samples/Tutorial/FulfilmentCenter.fsx @@ -11,7 +11,7 @@ #r "Microsoft.Azure.Cosmos.Client.dll" #r "System.Net.Http" #r "Serilog.Sinks.Seq.dll" -#r "Equinox.Cosmos.dll" +#r "Equinox.CosmosStore.dll" open FSharp.UMX @@ -103,7 +103,7 @@ module FulfilmentCenter = member __.Read id : Async = read id member __.QueryWithVersion(id, render : Fold.State -> 'res) : Async = queryEx id render -open Equinox.Cosmos +open Equinox.CosmosStore open System module Log = @@ -114,11 +114,11 @@ module Log = let log = let c = LoggerConfiguration() let c = if verbose then c.MinimumLevel.Debug() else c - let c = c.WriteTo.Sink(Store.Log.InternalMetrics.Stats.LogSink()) // to power Log.InternalMetrics.dump + let c = c.WriteTo.Sink(Core.Log.InternalMetrics.Stats.LogSink()) // to power Log.InternalMetrics.dump let c = c.WriteTo.Seq("http://localhost:5341") // https://getseq.net let c = c.WriteTo.Console(if verbose then LogEventLevel.Debug else LogEventLevel.Information) c.CreateLogger() - let dumpMetrics () = Store.Log.InternalMetrics.dump log + let dumpMetrics () = Core.Log.InternalMetrics.dump log module Store = @@ -133,7 +133,7 @@ module Store = open FulfilmentCenter -let resolver = Resolver(Store.context, Events.codec, Fold.fold, Fold.initial, Store.cacheStrategy, AccessStrategy.Unoptimized) +let resolver = CosmosStoreCategory(Store.context, Events.codec, Fold.fold, Fold.initial, Store.cacheStrategy, AccessStrategy.Unoptimized) let resolve id = Equinox.Stream(Log.log, resolver.Resolve(streamName id), maxAttempts = 3) let service = Service(resolve) diff --git a/samples/Tutorial/Gapless.fs b/samples/Tutorial/Gapless.fs index 7042b4e10..5d84ea7ce 100644 --- a/samples/Tutorial/Gapless.fs +++ b/samples/Tutorial/Gapless.fs @@ -76,10 +76,10 @@ let [] appName = "equinox-tutorial-gapless" module Cosmos = - open Equinox.Cosmos + open Equinox.CosmosStore let private create (context,cache,accessStrategy) = let cacheStrategy = CachingStrategy.SlidingWindow (cache, TimeSpan.FromMinutes 20.) // OR CachingStrategy.NoCaching - let resolver = Resolver(context, Events.codec, Fold.fold, Fold.initial, cacheStrategy, accessStrategy) + let resolver = CosmosStoreCategory(context, Events.codec, Fold.fold, Fold.initial, cacheStrategy, accessStrategy) let resolve sequenceId = let streamName = streamName sequenceId Equinox.Stream(Serilog.Log.Logger, resolver.Resolve streamName, maxAttempts = 3) diff --git a/samples/Tutorial/Index.fs b/samples/Tutorial/Index.fs index 92bd1c06b..c1a6c9241 100644 --- a/samples/Tutorial/Index.fs +++ b/samples/Tutorial/Index.fs @@ -53,11 +53,11 @@ let create<'t> resolve indexId = module Cosmos = - open Equinox.Cosmos + open Equinox.CosmosStore let create<'v> (context,cache) = let cacheStrategy = CachingStrategy.SlidingWindow (cache, System.TimeSpan.FromMinutes 20.) let accessStrategy = AccessStrategy.RollingState Fold.snapshot - let resolver = Resolver(context, Events.codec, Fold.fold, Fold.initial, cacheStrategy, accessStrategy) + let resolver = CosmosStoreCategory(context, Events.codec, Fold.fold, Fold.initial, cacheStrategy, accessStrategy) create resolver.Resolve module MemoryStore = diff --git a/samples/Tutorial/Sequence.fs b/samples/Tutorial/Sequence.fs index c69acb510..ebdfb0d34 100644 --- a/samples/Tutorial/Sequence.fs +++ b/samples/Tutorial/Sequence.fs @@ -55,10 +55,10 @@ let create resolve = module Cosmos = - open Equinox.Cosmos + open Equinox.CosmosStore let private create (context,cache,accessStrategy) = let cacheStrategy = CachingStrategy.SlidingWindow (cache, TimeSpan.FromMinutes 20.) // OR CachingStrategy.NoCaching - let resolver = Resolver(context, Events.codec, Fold.fold, Fold.initial, cacheStrategy, accessStrategy) + let resolver = CosmosStoreCategory(context, Events.codec, Fold.fold, Fold.initial, cacheStrategy, accessStrategy) create resolver.Resolve module LatestKnownEvent = diff --git a/samples/Tutorial/Set.fs b/samples/Tutorial/Set.fs index b9b5a3ae7..d7e013e5b 100644 --- a/samples/Tutorial/Set.fs +++ b/samples/Tutorial/Set.fs @@ -53,11 +53,11 @@ let create resolve setId = module Cosmos = - open Equinox.Cosmos + open Equinox.CosmosStore let create (context,cache) = let cacheStrategy = CachingStrategy.SlidingWindow (cache, System.TimeSpan.FromMinutes 20.) let accessStrategy = AccessStrategy.RollingState Fold.snapshot - let resolver = Resolver(context, Events.codec, Fold.fold, Fold.initial, cacheStrategy, accessStrategy) + let resolver = CosmosStoreCategory(context, Events.codec, Fold.fold, Fold.initial, cacheStrategy, accessStrategy) create resolver.Resolve module MemoryStore = diff --git a/samples/Tutorial/Todo.fsx b/samples/Tutorial/Todo.fsx index cc642230a..aeebae2ac 100644 --- a/samples/Tutorial/Todo.fsx +++ b/samples/Tutorial/Todo.fsx @@ -15,7 +15,7 @@ #r "FsCodec.NewtonsoftJson.dll" #r "FSharp.Control.AsyncSeq.dll" #r "Microsoft.Azure.Cosmos.Client.dll" -#r "Equinox.Cosmos.dll" +#r "Equinox.CosmosStore.dll" open System @@ -116,7 +116,7 @@ let log = LoggerConfiguration().WriteTo.Console().CreateLogger() let [] appName = "equinox-tutorial" let cache = Equinox.Cache(appName, 20) -open Equinox.Cosmos +open Equinox.CosmosStore module Store = let read key = Environment.GetEnvironmentVariable key |> Option.ofObj |> Option.get @@ -129,7 +129,7 @@ module Store = module TodosCategory = let access = AccessStrategy.Snapshot (isOrigin,snapshot) - let resolver = Resolver(Store.store, codec, fold, initial, Store.cacheStrategy, access=access) + let resolver = CosmosStoreCategory(Store.store, codec, fold, initial, Store.cacheStrategy, access=access) let resolve id = Equinox.Stream(log, resolver.Resolve(streamName id), maxAttempts = 3) let service = Service(TodosCategory.resolve) diff --git a/samples/Tutorial/Tutorial.fsproj b/samples/Tutorial/Tutorial.fsproj index 250f0f066..bb4b8c3a8 100644 --- a/samples/Tutorial/Tutorial.fsproj +++ b/samples/Tutorial/Tutorial.fsproj @@ -24,7 +24,7 @@ - + diff --git a/samples/Tutorial/Upload.fs b/samples/Tutorial/Upload.fs index f7b5b742c..b11ed8c2f 100644 --- a/samples/Tutorial/Upload.fs +++ b/samples/Tutorial/Upload.fs @@ -70,10 +70,10 @@ let create resolve = module Cosmos = - open Equinox.Cosmos + open Equinox.CosmosStore let create (context,cache) = let cacheStrategy = CachingStrategy.SlidingWindow (cache, TimeSpan.FromMinutes 20.) // OR CachingStrategy.NoCaching - let resolver = Resolver(context, Events.codec, Fold.fold, Fold.initial, cacheStrategy, AccessStrategy.LatestKnownEvent) + let resolver = CosmosStoreCategory(context, Events.codec, Fold.fold, Fold.initial, cacheStrategy, AccessStrategy.LatestKnownEvent) create resolver.Resolve module EventStore = diff --git a/samples/Web/Program.fs b/samples/Web/Program.fs index c49531f82..fab9549b7 100644 --- a/samples/Web/Program.fs +++ b/samples/Web/Program.fs @@ -29,7 +29,7 @@ module Program = .Enrich.FromLogContext() .WriteTo.Console() // TOCONSIDER log and reset every minute or something ? - .WriteTo.Sink(Equinox.Cosmos.Store.Log.InternalMetrics.Stats.LogSink()) + .WriteTo.Sink(Equinox.CosmosStore.Core.Log.InternalMetrics.Stats.LogSink()) .WriteTo.Sink(Equinox.EventStore.Log.InternalMetrics.Stats.LogSink()) .WriteTo.Sink(Equinox.SqlStreamStore.Log.InternalMetrics.Stats.LogSink()) let c = @@ -41,4 +41,4 @@ module Program = 0 with e -> eprintfn "%s" e.Message - 1 \ No newline at end of file + 1 diff --git a/samples/Web/Startup.fs b/samples/Web/Startup.fs index e896f6f09..51245dd2c 100644 --- a/samples/Web/Startup.fs +++ b/samples/Web/Startup.fs @@ -70,7 +70,7 @@ type Startup() = | Some (Cosmos sargs) -> let storeLog = createStoreLog <| sargs.Contains Storage.Cosmos.Arguments.VerboseStore log.Information("CosmosDb Storage options: {options:l}", options) - Storage.Cosmos.config (log,storeLog) (cache, unfolds, defaultBatchSize) (Storage.Cosmos.Info sargs), storeLog + Storage.Cosmos.config log (cache, unfolds, defaultBatchSize) (Storage.Cosmos.Info sargs), storeLog | Some (Es sargs) -> let storeLog = createStoreLog <| sargs.Contains Storage.EventStore.Arguments.VerboseStore log.Information("EventStore Storage options: {options:l}", options) diff --git a/src/Equinox.Cosmos/Cosmos.fs b/src/Equinox.CosmosStore/CosmosStore.fs similarity index 73% rename from src/Equinox.Cosmos/Cosmos.fs rename to src/Equinox.CosmosStore/CosmosStore.fs index 8912c923a..15bf418c2 100644 --- a/src/Equinox.Cosmos/Cosmos.fs +++ b/src/Equinox.CosmosStore/CosmosStore.fs @@ -1,12 +1,12 @@ -namespace Equinox.Cosmos.Store +namespace Equinox.CosmosStore.Core open Equinox.Core open FsCodec +open FSharp.Control open Microsoft.Azure.Cosmos open Newtonsoft.Json open Serilog open System -open System.IO /// A single Domain Event from the array held in a Batch type [] @@ -78,6 +78,7 @@ type [] static member internal IndexedFields = [Batch.PartitionKeyField; "i"; "n"] /// Compaction/Snapshot/Projection Event based on the state at a given point in time `i` +[] type Unfold = { /// Base: Stream Position (Version) of State from which this Unfold Event was generated i: int64 @@ -104,7 +105,7 @@ and Base64DeflateUtf8JsonConverter() = let pickle (input : byte[]) : string = if input = null then null else - use output = new MemoryStream() + use output = new System.IO.MemoryStream() use compressor = new System.IO.Compression.DeflateStream(output, System.IO.Compression.CompressionLevel.Optimal) compressor.Write(input,0,input.Length) compressor.Close() @@ -113,9 +114,9 @@ and Base64DeflateUtf8JsonConverter() = if str = null then null else let compressedBytes = System.Convert.FromBase64String str - use input = new MemoryStream(compressedBytes) + use input = new System.IO.MemoryStream(compressedBytes) use decompressor = new System.IO.Compression.DeflateStream(input, System.IO.Compression.CompressionMode.Decompress) - use output = new MemoryStream() + use output = new System.IO.MemoryStream() decompressor.CopyTo(output) output.ToArray() @@ -159,8 +160,8 @@ type [] static member internal WellKnownDocumentId = "-1" /// Position and Etag to which an operation is relative -type [] - Position = { index: int64; etag: string option } +[] +type Position = { index: int64; etag: string option } module internal Position = /// NB very inefficient compared to FromDocument or using one already returned to you @@ -392,12 +393,13 @@ module private MicrosoftAzureCosmosWrappers = // NB while the docs suggest you may see a 412, the NotModified in the body of the try/with is actually what happens | CosmosException (CosmosStatusCode System.Net.HttpStatusCode.PreconditionFailed as e) -> return e.RequestCharge, NotModified } -module Sync = - // NB don't nest in a private module, or serialization will fail miserably ;) - [] - type SyncResponse = { etag: string; n: int64; conflicts: Unfold[] } - let [] private sprocName = "EquinoxRollingUnfolds4" // NB need to rename/number for any breaking change - let [] private sprocBody = """ +// NB don't nest in a private module, or serialization will fail miserably ;) +[] +type SyncResponse = { etag: string; n: int64; conflicts: Unfold[] } + +module internal SyncStoredProc = + let [] name = "EquinoxRollingUnfolds4" // NB need to rename/number for any breaking change + let [] body = """ // Manages the merging of the supplied Request Batch into the stream // 0 perform concurrency check (index=-1 -> always append; index=-2 -> check based on .etag; _ -> check .n=.index) @@ -481,28 +483,37 @@ function sync(req, expIndex, expEtag) { } }""" +[] +type internal SyncExp = Version of int64 | Etag of string | Any + +module internal Sync = + [] type Result = | Written of Position | Conflict of Position * events: ITimelineEvent[] | ConflictUnknown of Position - type [] Exp = Version of int64 | Etag of string | Any let private run (container : Container, stream : string) (exp, req: Tip) : Async = async { - let ep = match exp with Exp.Version ev -> Position.fromI ev | Exp.Etag et -> Position.fromEtag et | Exp.Any -> Position.fromAppendAtEnd + let ep = + match exp with + | SyncExp.Version ev -> Position.fromI ev + | SyncExp.Etag et -> Position.fromEtag et + | SyncExp.Any -> Position.fromAppendAtEnd let! ct = Async.CancellationToken let args = [| box req; box ep.index; box (Option.toObj ep.etag)|] let! (res : Scripts.StoredProcedureExecuteResponse) = - container.Scripts.ExecuteStoredProcedureAsync(sprocName, PartitionKey stream, args, cancellationToken = ct) |> Async.AwaitTaskCorrect + container.Scripts.ExecuteStoredProcedureAsync(SyncStoredProc.name, PartitionKey stream, args, cancellationToken = ct) |> Async.AwaitTaskCorrect let newPos = { index = res.Resource.n; etag = Option.ofObj res.Resource.etag } return res.RequestCharge, res.Resource.conflicts |> function | null -> Result.Written newPos | [||] when newPos.index = 0L -> Result.Conflict (newPos, Array.empty) | [||] -> Result.ConflictUnknown newPos - | xs -> Result.Conflict (newPos, Enum.Unfolds xs |> Array.ofSeq) } + | xs -> + Result.Conflict (newPos, Enum.Unfolds xs |> Array.ofSeq) } - let private logged (container,stream) (exp : Exp, req: Tip) (log : ILogger) + let private logged (container,stream) (exp : SyncExp, req: Tip) (log : ILogger) : Async = async { let! t, (ru, result) = run (container,stream) (exp, req) |> Stopwatch.Time let (Log.BatchLen bytes), count = Enum.Events req, req.e.Length @@ -512,9 +523,9 @@ function sync(req, expIndex, expEtag) { let verbose = log.IsEnabled Serilog.Events.LogEventLevel.Debug (if verbose then log |> Log.propEvents (Enum.Events req) |> Log.propDataUnfolds req.u else log) |> match exp with - | Exp.Etag et -> Log.prop "expectedEtag" et - | Exp.Version ev -> Log.prop "expectedVersion" ev - | Exp.Any -> Log.prop "expectedVersion" -1 + | SyncExp.Etag et -> Log.prop "expectedEtag" et + | SyncExp.Version ev -> Log.prop "expectedVersion" ev + | SyncExp.Any -> Log.prop "expectedVersion" -1 |> match result with | Result.Written pos -> Log.prop "nextExpectedVersion" pos >> Log.event (Log.SyncSuccess (mkMetric ru)) @@ -527,8 +538,8 @@ function sync(req, expIndex, expEtag) { "Sync", stream, count, req.u.Length, (let e = t.Elapsed in e.TotalMilliseconds), ru, bytes, exp) return result } - let batch (log : ILogger) retryPolicy containerStream batch: Async = - let call = logged containerStream batch + let batch (log : ILogger) retryPolicy containerStream expBatch: Async = + let call = logged containerStream expBatch Log.withLoggedRetries retryPolicy "writeAttempt" call log let mkBatch (stream: string) (events: IEventData<_>[]) unfolds: Tip = { p = stream; id = Tip.WellKnownDocumentId; n = -1L(*Server-managed*); i = -1L(*Server-managed*); _etag = null @@ -537,76 +548,86 @@ function sync(req, expIndex, expEtag) { let mkUnfold baseIndex (unfolds: IEventData<_> seq) : Unfold seq = unfolds |> Seq.mapi (fun offset x -> { i = baseIndex + int64 offset; c = x.EventType; d = x.Data; m = x.Meta; t = DateTimeOffset.UtcNow } : Unfold) - module Initialization = - type [] Provisioning = Container of rus: int | Database of rus: int - let adjustOfferC (c:Container) (rus : int) = async { - let! ct = Async.CancellationToken - let! _ = c.ReplaceThroughputAsync(rus, cancellationToken = ct) |> Async.AwaitTaskCorrect in () } - let adjustOfferD (d:Database) (rus : int) = async { - let! ct = Async.CancellationToken - let! _ = d.ReplaceThroughputAsync(rus, cancellationToken = ct) |> Async.AwaitTaskCorrect in () } - let private createDatabaseIfNotExists (client:CosmosClient) dName maybeRus = async { - let! ct = Async.CancellationToken - let! dbr = client.CreateDatabaseIfNotExistsAsync(id=dName, throughput = Option.toNullable maybeRus, cancellationToken=ct) |> Async.AwaitTaskCorrect - return dbr.Database } - let private createOrProvisionDatabase (client:CosmosClient) dName mode = async { - match mode with - | Provisioning.Database rus -> - let! db = createDatabaseIfNotExists client dName (Some rus) - do! adjustOfferD db rus - | Provisioning.Container _ -> - let! _ = createDatabaseIfNotExists client dName None in () } - let private createContainerIfNotExists (d:Database) (cp:ContainerProperties) maybeRus = async { - let! ct = Async.CancellationToken - let! c = d.CreateContainerIfNotExistsAsync(cp, throughput=Option.toNullable maybeRus, cancellationToken=ct) |> Async.AwaitTaskCorrect - return c.Container } - let private createOrProvisionContainer (d:Database) (cp:ContainerProperties) mode = async { - match mode with - | Provisioning.Database _ -> - return! createContainerIfNotExists d cp None - | Provisioning.Container rus -> - let! c = createContainerIfNotExists d cp (Some rus) - do! adjustOfferC c rus - return c } - let private createStoredProcIfNotExists (c:Container) (name, body): Async = async { - try let! r = c.Scripts.CreateStoredProcedureAsync(Scripts.StoredProcedureProperties(id=name, body=body)) |> Async.AwaitTaskCorrect - return r.RequestCharge - with CosmosException ((CosmosStatusCode sc) as e) when sc = System.Net.HttpStatusCode.Conflict -> return e.RequestCharge } - let private mkContainerProperties containerName partitionKeyFieldName = - ContainerProperties(id = containerName, partitionKeyPath = sprintf "/%s" partitionKeyFieldName) - let private createBatchAndTipContainerIfNotExists (client: CosmosClient) (dName,cName) mode : Async = - let def = mkContainerProperties cName Batch.PartitionKeyField - def.IndexingPolicy.IndexingMode <- IndexingMode.Consistent - def.IndexingPolicy.Automatic <- true - // Can either do a blacklist or a whitelist - // Given how long and variable the blacklist would be, we whitelist instead - def.IndexingPolicy.ExcludedPaths.Add(ExcludedPath(Path="/*")) - // NB its critical to index the nominated PartitionKey field defined above or there will be runtime errors - for k in Batch.IndexedFields do def.IndexingPolicy.IncludedPaths.Add(IncludedPath(Path = sprintf "/%s/?" k)) - createOrProvisionContainer (client.GetDatabase dName) def mode - let createSyncStoredProcIfNotExists (log: ILogger option) container = async { - let! t, ru = createStoredProcIfNotExists container (sprocName,sprocBody) |> Stopwatch.Time - match log with - | None -> () - | Some log -> log.Information("Created stored procedure {sprocId} in {ms}ms rc={ru}", sprocName, (let e = t.Elapsed in e.TotalMilliseconds), ru) } - let private createAuxContainerIfNotExists (client: CosmosClient) (dName,cName) mode : Async = - let def = mkContainerProperties cName "id" // as per Cosmos team, Partition Key must be "/id" - // TL;DR no indexing of any kind; see https://github.com/Azure/azure-documentdb-changefeedprocessor-dotnet/issues/142 - def.IndexingPolicy.Automatic <- false - def.IndexingPolicy.IndexingMode <- IndexingMode.None - createOrProvisionContainer (client.GetDatabase dName) def mode - let init log (client: CosmosClient) (dName,cName) mode skipStoredProc = async { - do! createOrProvisionDatabase client dName mode - let! container = createBatchAndTipContainerIfNotExists client (dName,cName) mode - if not skipStoredProc then - do! createSyncStoredProcIfNotExists (Some log) container } - let initAux (client: CosmosClient) (dName,cName) rus = async { - // Hardwired for now (not sure if CFP can store in a Database-allocated as it would need to be supplying partion keys) - let mode = Provisioning.Container rus - do! createOrProvisionDatabase client dName mode - return! createAuxContainerIfNotExists client (dName,cName) mode } +module Initialization = + + type [] Provisioning = Container of rus: int | Database of rus: int + let adjustOfferC (c:Container) (rus : int) = async { + let! ct = Async.CancellationToken + let! _ = c.ReplaceThroughputAsync(rus, cancellationToken = ct) |> Async.AwaitTaskCorrect in () } + let adjustOfferD (d:Database) (rus : int) = async { + let! ct = Async.CancellationToken + let! _ = d.ReplaceThroughputAsync(rus, cancellationToken = ct) |> Async.AwaitTaskCorrect in () } + let private createDatabaseIfNotExists (client:CosmosClient) dName maybeRus = async { + let! ct = Async.CancellationToken + let! dbr = client.CreateDatabaseIfNotExistsAsync(id=dName, throughput = Option.toNullable maybeRus, cancellationToken=ct) |> Async.AwaitTaskCorrect + return dbr.Database } + let private createOrProvisionDatabase (client:CosmosClient) dName mode = async { + match mode with + | Provisioning.Database rus -> + let! db = createDatabaseIfNotExists client dName (Some rus) + do! adjustOfferD db rus + | Provisioning.Container _ -> + let! _ = createDatabaseIfNotExists client dName None in () } + let private createContainerIfNotExists (d:Database) (cp:ContainerProperties) maybeRus = async { + let! ct = Async.CancellationToken + let! c = d.CreateContainerIfNotExistsAsync(cp, throughput=Option.toNullable maybeRus, cancellationToken=ct) |> Async.AwaitTaskCorrect + return c.Container } + let private createOrProvisionContainer (d:Database) (cp:ContainerProperties) mode = async { + match mode with + | Provisioning.Database _ -> + return! createContainerIfNotExists d cp None + | Provisioning.Container rus -> + let! c = createContainerIfNotExists d cp (Some rus) + do! adjustOfferC c rus + return c } + let private createStoredProcIfNotExists (c:Container) (name, body): Async = async { + try let! r = c.Scripts.CreateStoredProcedureAsync(Scripts.StoredProcedureProperties(id=name, body=body)) |> Async.AwaitTaskCorrect + return r.RequestCharge + with CosmosException ((CosmosStatusCode sc) as e) when sc = System.Net.HttpStatusCode.Conflict -> return e.RequestCharge } + let private mkContainerProperties containerName partitionKeyFieldName = + ContainerProperties(id = containerName, partitionKeyPath = sprintf "/%s" partitionKeyFieldName) + let private createBatchAndTipContainerIfNotExists (client: CosmosClient) (dName,cName) mode : Async = + let def = mkContainerProperties cName Batch.PartitionKeyField + def.IndexingPolicy.IndexingMode <- IndexingMode.Consistent + def.IndexingPolicy.Automatic <- true + // Can either do a blacklist or a whitelist + // Given how long and variable the blacklist would be, we whitelist instead + def.IndexingPolicy.ExcludedPaths.Add(ExcludedPath(Path="/*")) + // NB its critical to index the nominated PartitionKey field defined above or there will be runtime errors + for k in Batch.IndexedFields do def.IndexingPolicy.IncludedPaths.Add(IncludedPath(Path = sprintf "/%s/?" k)) + createOrProvisionContainer (client.GetDatabase dName) def mode + let createSyncStoredProcIfNotExists (log: ILogger option) container = async { + let! t, ru = createStoredProcIfNotExists container (SyncStoredProc.name,SyncStoredProc.body) |> Stopwatch.Time + match log with + | None -> () + | Some log -> log.Information("Created stored procedure {procName} in {ms}ms {ru}RU", SyncStoredProc.name, (let e = t.Elapsed in e.TotalMilliseconds), ru) } + let private createAuxContainerIfNotExists (client: CosmosClient) (dName,cName) mode : Async = + let def = mkContainerProperties cName "id" // as per Cosmos team, Partition Key must be "/id" + // TL;DR no indexing of any kind; see https://github.com/Azure/azure-documentdb-changefeedprocessor-dotnet/issues/142 + def.IndexingPolicy.Automatic <- false + def.IndexingPolicy.IndexingMode <- IndexingMode.None + createOrProvisionContainer (client.GetDatabase dName) def mode + let init log (client: CosmosClient) (dName,cName) mode skipStoredProc = async { + do! createOrProvisionDatabase client dName mode + let! container = createBatchAndTipContainerIfNotExists client (dName,cName) mode + if not skipStoredProc then + do! createSyncStoredProcIfNotExists (Some log) container } + let initAux (client: CosmosClient) (dName,cName) rus = async { + // Hardwired for now (not sure if CFP can store in a Database-allocated as it would need to be supplying partition keys) + let mode = Provisioning.Container rus + do! createOrProvisionDatabase client dName mode + return! createAuxContainerIfNotExists client (dName,cName) mode } + + // Holds Container state, coordinating initialization activities + type internal ContainerInitializerGuard(container : Container, ?initContainer : Container -> Async) = + let initGuard = initContainer |> Option.map (fun init -> AsyncCacheCell(init container)) + + member __.Container = container + member internal __.InitializationGate = match initGuard with Some g when not (g.IsValid()) -> Some g.AwaitValue | _ -> None + module internal Tip = + let private get (container : Container, stream : string) (maybePos: Position option) = let ro = match maybePos with Some { etag=Some etag } -> ItemRequestOptions(IfNoneMatchEtag=etag) | _ -> null container.TryReadItem(PartitionKey stream, Tip.WellKnownDocumentId, ro) @@ -635,10 +656,11 @@ module internal Tip = match res with | ReadResult.NotModified -> return Result.NotModified | ReadResult.NotFound -> return Result.NotFound - | ReadResult.Found tip -> return Result.Found (Position.fromTip tip, Enum.EventsAndUnfolds tip |> Array.ofSeq) } + | ReadResult.Found tip -> + return Result.Found (Position.fromTip tip, Enum.EventsAndUnfolds tip |> Array.ofSeq) } module internal Query = - open FSharp.Control + let private mkQuery (container : Container, stream: string) maxItems (direction: Direction) startPos : FeedIterator= let query = let root = sprintf "SELECT c.id, c.i, c._etag, c.n, c.e FROM c WHERE c.id!=\"%s\"" Tip.WellKnownDocumentId @@ -648,7 +670,7 @@ module internal Tip = | Some { index = positionSoExclusiveWhenBackward } -> let cond = if direction = Direction.Forward then "c.n > @startPos" else "c.i < @startPos" QueryDefinition(sprintf "%s AND %s %s" root cond tail).WithParameter("@startPos", positionSoExclusiveWhenBackward) - let qro = new QueryRequestOptions(PartitionKey = Nullable(PartitionKey stream), MaxItemCount=Nullable maxItems) + let qro = QueryRequestOptions(PartitionKey = Nullable(PartitionKey stream), MaxItemCount=Nullable maxItems) container.GetItemQueryIterator(query, requestOptions = qro) // Unrolls the Batches in a response - note when reading backwards, the events are emitted in reverse order of index @@ -670,12 +692,12 @@ module internal Tip = return events, maybePosition, ru } let private run (log : ILogger) (readSlice: FeedIterator -> ILogger -> Async[] * Position option * float>) - (maxPermittedBatchReads: int option) + (maxRequests: int option) (query: FeedIterator) : AsyncSeq[] * Position option * float> = let rec loop batchCount : AsyncSeq[] * Position option * float> = asyncSeq { - match maxPermittedBatchReads with - | Some mpbr when batchCount >= mpbr -> log.Information "batch Limit exceeded"; invalidOp "batch Limit exceeded" + match maxRequests with + | Some mr when batchCount >= mr -> log.Information "batch Limit exceeded"; invalidOp "batch Limit exceeded" | _ -> () let batchLog = log |> Log.prop "batchIndex" batchCount @@ -727,15 +749,15 @@ module internal Tip = log.Information("EqxCosmos Stop stream={stream} at={index} {case} used={used} residual={residual}", stream, x.Index, x.EventType, used, residual) false - | _ -> true) (*continue the search*) + | _ -> true) |> AsyncSeq.toArrayAsync return events, maybeTipPos, ru } let query = mkQuery (container,stream) maxItems direction startPos let pullSlice = handleResponse direction stream startPos let retryingLoggingReadSlice query = Log.withLoggedRetries retryPolicy "readAttempt" (pullSlice query) let log = log |> Log.prop "batchSize" maxItems |> Log.prop "stream" stream - let readlog = log |> Log.prop "direction" direction - let batches : AsyncSeq[] * Position option * float> = run readlog retryingLoggingReadSlice maxRequests query + let readLog = log |> Log.prop "direction" direction + let batches : AsyncSeq[] * Position option * float> = run readLog retryingLoggingReadSlice maxRequests query let! t, (events, maybeTipPos, ru) = mergeBatches log batches |> Stopwatch.Time let raws, decoded = (Array.map fst events), (events |> Seq.choose snd |> Array.ofSeq) let pos = match maybeTipPos with Some p -> p | None -> Position.fromMaxIndex raws @@ -754,16 +776,16 @@ module internal Tip = let mutable ru = 0. let allSlices = ResizeArray() let startTicks = System.Diagnostics.Stopwatch.GetTimestamp() - try let readlog = log |> Log.prop "direction" direction + try let readLog = log |> Log.prop "direction" direction let mutable ok = true while ok do incr responseCount match maxRequests with - | Some mpbr when !responseCount >= mpbr -> readlog.Information "batch Limit exceeded"; invalidOp "batch Limit exceeded" + | Some mr when !responseCount >= mr -> readLog.Information "batch Limit exceeded"; invalidOp "batch Limit exceeded" | _ -> () - let batchLog = readlog |> Log.prop "batchIndex" !responseCount + let batchLog = readLog |> Log.prop "batchIndex" !responseCount let! (slice,_pos,rus) = retryingLoggingReadSlice query batchLog ru <- ru + rus allSlices.AddRange(slice) @@ -791,8 +813,6 @@ module internal Tip = // Note: public so BatchIndices can be deserialized into module Delete = - open FSharp.Control - type BatchIndices = { id : string; i : int64; n : int64 } let pruneBefore (log: ILogger) (container: Container, stream: string) maxItems beforePos : Async = async { @@ -891,40 +911,27 @@ module Delete = return eventsDeleted, eventsDeferred, lwm } -type [] Token = { container: Container; stream: string; pos: Position } -module Token = - let create (container,stream) pos : StreamToken = - { value = box { container = container; stream = stream; pos = pos } - version = pos.index } - let (|Unpack|) (token: StreamToken) : Container*string*Position = let t = unbox token.value in t.container,t.stream,t.pos - let supersedes (Unpack (_,_,currentPos)) (Unpack (_,_,xPos)) = +type [] Token = { stream: string; pos: Position } +module internal Token = + + let create stream pos : StreamToken = { value = box { stream = stream; pos = pos }; version = pos.index } + let (|Unpack|) (token: StreamToken) : string*Position = let t = unbox token.value in t.stream,t.pos + let supersedes (Unpack (_,currentPos)) (Unpack (_,xPos)) = let currentVersion, newVersion = currentPos.index, xPos.index let currentETag, newETag = currentPos.etag, xPos.etag newVersion > currentVersion || currentETag <> newETag [] module Internal = + [] type InternalSyncResult = Written of StreamToken | ConflictUnknown of StreamToken | Conflict of StreamToken * ITimelineEvent[] [] type LoadFromTokenResult<'event> = Unchanged | Found of StreamToken * 'event[] -namespace Equinox.Cosmos - -open Equinox -open Equinox.Core -open Equinox.Cosmos.Store -open FsCodec -open FSharp.Control -open Microsoft.Azure.Cosmos -open Serilog -open System -open System.Collections.Concurrent - /// Defines policies for retrying with respect to transient failures calling CosmosDb (as opposed to application level concurrency conflicts) -type Connection(client: CosmosClient, []?readRetryPolicy: IRetryPolicy, []?writeRetryPolicy) = - member __.Client = client +type RetryPolicy([]?readRetryPolicy: IRetryPolicy, []?writeRetryPolicy) = member __.TipRetryPolicy = readRetryPolicy member __.QueryRetryPolicy = readRetryPolicy member __.WriteRetryPolicy = writeRetryPolicy @@ -943,7 +950,7 @@ type BatchingPolicy /// Maximum number of trips to permit when slicing the work into multiple responses based on `MaxItems` member __.MaxRequests = maxRequests -type Gateway(conn : Connection, batching : BatchingPolicy) = +type internal StoreClient(container : Container, batching : BatchingPolicy, retry : RetryPolicy) = let (|FromUnfold|_|) (tryDecode: #IEventData<_> -> 'event option) (isOrigin: 'event -> bool) (xs:#IEventData<_>[]) : Option<'event[]> = let items = ResizeArray() let isOrigin' e = @@ -955,96 +962,87 @@ type Gateway(conn : Connection, batching : BatchingPolicy) = match Array.tryFindIndexBack isOrigin' xs with | None -> None | Some _ -> items.ToArray() |> Some - member __.Client = conn.Client - member __.LoadBackwardsStopping log (container, stream) (tryDecode,isOrigin): Async = async { - let! pos, events = Query.walk log (container,stream) conn.QueryRetryPolicy batching.MaxItems batching.MaxRequests Direction.Backward None (tryDecode,isOrigin) + member __.LoadBackwardsStopping(log, stream, (tryDecode,isOrigin)): Async = async { + let! pos, events = Query.walk log (container,stream) retry.QueryRetryPolicy batching.MaxItems batching.MaxRequests Direction.Backward None (tryDecode,isOrigin) Array.Reverse events - return Token.create (container,stream) pos, events } - member __.Read log (container,stream) direction startPos (tryDecode,isOrigin) : Async = async { - let! pos, events = Query.walk log (container,stream) conn.QueryRetryPolicy batching.MaxItems batching.MaxRequests direction startPos (tryDecode,isOrigin) - return Token.create (container,stream) pos, events } - member __.ReadLazy (batching: BatchingPolicy) log (container,stream) direction startPos (tryDecode,isOrigin) : AsyncSeq<'event[]> = - Query.walkLazy log (container,stream) conn.QueryRetryPolicy batching.MaxItems batching.MaxRequests direction startPos (tryDecode,isOrigin) - member __.LoadFromUnfoldsOrRollingSnapshots log (containerStream,maybePos) (tryDecode,isOrigin): Async = async { - let! res = Tip.tryLoad log conn.TipRetryPolicy containerStream maybePos - match res with - | Tip.Result.NotFound -> return Token.create containerStream Position.fromKnownEmpty, Array.empty + return Token.create stream pos, events } + member __.Read(log, stream, direction, (tryDecode,isOrigin), startPos) : Async = async { + let! pos, events = Query.walk log (container,stream) retry.QueryRetryPolicy batching.MaxItems batching.MaxRequests direction startPos (tryDecode,isOrigin) + return Token.create stream pos, events } + member __.ReadLazy(log, batching: BatchingPolicy, stream, direction, startPos, (tryDecode,isOrigin)) : AsyncSeq<'event[]> = + Query.walkLazy log (container,stream) retry.QueryRetryPolicy batching.MaxItems batching.MaxRequests direction startPos (tryDecode,isOrigin) + member __.LoadFromUnfoldsOrRollingSnapshots(log, (stream,maybePos), (tryDecode,isOrigin)): Async = async { + match! Tip.tryLoad log retry.TipRetryPolicy (container,stream) maybePos with + | Tip.Result.NotFound -> return Token.create stream Position.fromKnownEmpty, Array.empty | Tip.Result.NotModified -> return invalidOp "Not handled" - | Tip.Result.Found (pos, FromUnfold tryDecode isOrigin span) -> return Token.create containerStream pos, span - | _ -> return! __.LoadBackwardsStopping log containerStream (tryDecode,isOrigin) } - member __.GetPosition(log, containerStream, ?pos): Async = async { - let! res = Tip.tryLoad log conn.TipRetryPolicy containerStream pos - match res with - | Tip.Result.NotFound -> return Token.create containerStream Position.fromKnownEmpty - | Tip.Result.NotModified -> return Token.create containerStream pos.Value - | Tip.Result.Found (pos, _unfoldsAndEvents) -> return Token.create containerStream pos } - member __.LoadFromToken(log, (container,stream,pos), (tryDecode, isOrigin)): Async> = async { - let! res = Tip.tryLoad log conn.TipRetryPolicy (container,stream) (Some pos) - match res with - | Tip.Result.NotFound -> return LoadFromTokenResult.Found (Token.create (container,stream) Position.fromKnownEmpty,Array.empty) + | Tip.Result.Found (pos, FromUnfold tryDecode isOrigin span) -> return Token.create stream pos, span + | _ -> return! __.LoadBackwardsStopping(log, stream, (tryDecode,isOrigin)) } + member __.GetPosition(log, stream, ?pos): Async = async { + match! Tip.tryLoad log retry.TipRetryPolicy (container,stream) pos with + | Tip.Result.NotFound -> return Token.create stream Position.fromKnownEmpty + | Tip.Result.NotModified -> return Token.create stream pos.Value + | Tip.Result.Found (pos, _unfoldsAndEvents) -> return Token.create stream pos } + member __.LoadFromToken(log, (stream,pos), (tryDecode, isOrigin)): Async> = async { + match! Tip.tryLoad log retry.TipRetryPolicy (container,stream) (Some pos) with + | Tip.Result.NotFound -> return LoadFromTokenResult.Found (Token.create stream Position.fromKnownEmpty,Array.empty) | Tip.Result.NotModified -> return LoadFromTokenResult.Unchanged - | Tip.Result.Found (pos, FromUnfold tryDecode isOrigin span) -> return LoadFromTokenResult.Found (Token.create (container,stream) pos, span) - | _ -> let! res = __.Read log (container,stream) Direction.Forward (Some pos) (tryDecode,isOrigin) + | Tip.Result.Found (pos, FromUnfold tryDecode isOrigin span) -> return LoadFromTokenResult.Found (Token.create stream pos, span) + | _ -> let! res = __.Read(log, stream, Direction.Forward, (tryDecode,isOrigin), (Some pos)) return LoadFromTokenResult.Found res } - member __.CreateSyncStoredProcIfNotExists log container = - Sync.Initialization.createSyncStoredProcIfNotExists log container - member __.Sync log containerStream (exp, batch: Tip): Async = async { + member __.Sync(log, stream, exp : SyncExp, batch: Tip): Async = async { if Array.isEmpty batch.e && Array.isEmpty batch.u then invalidOp "Must write either events or unfolds." - let! wr = Sync.batch log conn.WriteRetryPolicy containerStream (exp,batch) - match wr with - | Sync.Result.Conflict (pos',events) -> return InternalSyncResult.Conflict (Token.create containerStream pos',events) - | Sync.Result.ConflictUnknown pos' -> return InternalSyncResult.ConflictUnknown (Token.create containerStream pos') - | Sync.Result.Written pos' -> return InternalSyncResult.Written (Token.create containerStream pos') } - member __.Prune(log, (container, stream), beforeIndex) = + match! Sync.batch log retry.WriteRetryPolicy (container,stream) (exp,batch) with + | Sync.Result.Conflict (pos',events) -> return InternalSyncResult.Conflict (Token.create stream pos',events) + | Sync.Result.ConflictUnknown pos' -> return InternalSyncResult.ConflictUnknown (Token.create stream pos') + | Sync.Result.Written pos' -> return InternalSyncResult.Written (Token.create stream pos') } + member __.Prune(log, stream, beforeIndex) = Delete.pruneBefore log (container, stream) batching.MaxItems beforeIndex -type private Category<'event, 'state, 'context>(gateway : Gateway, codec : IEventCodec<'event,byte[],'context>) = +type internal Category<'event, 'state, 'context>(store : StoreClient, codec : IEventCodec<'event,byte[],'context>) = let (|TryDecodeFold|) (fold: 'state -> 'event seq -> 'state) initial (events: ITimelineEvent seq) : 'state = Seq.choose codec.TryDecode events |> fold initial - member __.Load includeUnfolds containerStream fold initial isOrigin (log : ILogger): Async = async { + member __.Load(log, stream, initial, includeUnfolds, fold, isOrigin): Async = async { let! token, events = - if not includeUnfolds then gateway.LoadBackwardsStopping log containerStream (codec.TryDecode,isOrigin) - else gateway.LoadFromUnfoldsOrRollingSnapshots log (containerStream,None) (codec.TryDecode,isOrigin) + if not includeUnfolds then store.LoadBackwardsStopping(log, stream, (codec.TryDecode,isOrigin)) + else store.LoadFromUnfoldsOrRollingSnapshots(log, (stream,None), (codec.TryDecode,isOrigin)) return token, fold initial events } - member __.LoadFromToken (Token.Unpack streamPos, state: 'state as current) fold isOrigin (log : ILogger): Async = async { - let! res = gateway.LoadFromToken(log, streamPos, (codec.TryDecode,isOrigin)) - match res with - | LoadFromTokenResult.Unchanged -> return current - | LoadFromTokenResult.Found (token', events') -> return token', fold state events' } - member __.Sync(Token.Unpack (container,stream,pos), state as current, events, mapUnfolds, fold, isOrigin, log, context): Async> = async { + member __.LoadFromToken(log, (Token.Unpack (stream, pos) as streamToken), state: 'state, fold, isOrigin): Async = async { + match! store.LoadFromToken(log, (stream, pos), (codec.TryDecode,isOrigin)) with + | LoadFromTokenResult.Unchanged -> return streamToken, state + | LoadFromTokenResult.Found (token', events) -> return token', fold state events } + member __.Sync(log, token, state, events, mapUnfolds, fold, isOrigin, context): Async> = async { let state' = fold state (Seq.ofList events) - let encode e = codec.Encode(context,e) + let encode e = codec.Encode(context, e) + let (Token.Unpack (stream,pos)) = token let exp,events,eventsEncoded,projectionsEncoded = match mapUnfolds with - | Choice1Of3 () -> Sync.Exp.Version pos.index, events, Seq.map encode events |> Array.ofSeq, Seq.empty - | Choice2Of3 unfold -> Sync.Exp.Version pos.index, events, Seq.map encode events |> Array.ofSeq, Seq.map encode (unfold events state') + | Choice1Of3 () -> SyncExp.Version pos.index, events, Seq.map encode events |> Array.ofSeq, Seq.empty + | Choice2Of3 unfold -> SyncExp.Version pos.index, events, Seq.map encode events |> Array.ofSeq, Seq.map encode (unfold events state') | Choice3Of3 transmute -> let events', unfolds = transmute events state' - Sync.Exp.Etag (defaultArg pos.etag null), events', Seq.map encode events' |> Array.ofSeq, Seq.map encode unfolds + SyncExp.Etag (defaultArg pos.etag null), events', Seq.map encode events' |> Array.ofSeq, Seq.map encode unfolds let baseIndex = pos.index + int64 (List.length events) let projections = Sync.mkUnfold baseIndex projectionsEncoded let batch = Sync.mkBatch stream eventsEncoded projections - let! res = gateway.Sync log (container,stream) (exp,batch) - match res with + match! store.Sync(log, stream, exp, batch) with | InternalSyncResult.Conflict (token',TryDecodeFold fold state events') -> return SyncResult.Conflict (async { return token', events' }) - | InternalSyncResult.ConflictUnknown _token' -> return SyncResult.Conflict (__.LoadFromToken current fold isOrigin log) + | InternalSyncResult.ConflictUnknown _token' -> return SyncResult.Conflict (__.LoadFromToken(log, token, state, fold, isOrigin)) | InternalSyncResult.Written token' -> return SyncResult.Written (token', state') } -module Caching = +module internal Caching = /// Forwards all state changes in all streams of an ICategory to a `tee` function - type CategoryTee<'event, 'state, 'context>(inner: ICategory<'event, 'state, Container*string,'context>, tee : string -> StreamToken * 'state -> Async) = + type CategoryTee<'event, 'state, 'context>(inner: ICategory<'event, 'state, string,'context>, tee : string -> StreamToken * 'state -> Async) = let intercept streamName tokenAndState = async { let! _ = tee streamName tokenAndState return tokenAndState } let loadAndIntercept load streamName = async { let! tokenAndState = load return! intercept streamName tokenAndState } - interface ICategory<'event, 'state, Container*string, 'context> with - member __.Load(log, (container,streamName), opt) : Async = - loadAndIntercept (inner.Load(log, (container,streamName), opt)) streamName - member __.TrySync(log : ILogger, (Token.Unpack (_container,stream,_) as streamToken), state, events : 'event list, context) + interface ICategory<'event, 'state, string, 'context> with + member __.Load(log, streamName, opt) : Async = + loadAndIntercept (inner.Load(log, streamName, opt)) streamName + member __.TrySync(log : ILogger, (Token.Unpack (stream,_) as streamToken), state, events : 'event list, context) : Async> = async { - let! syncRes = inner.TrySync(log, streamToken, state, events, context) - match syncRes with + match! inner.TrySync(log, streamToken, state, events, context) with | SyncResult.Conflict resync -> return SyncResult.Conflict(loadAndIntercept resync stream) | SyncResult.Written(token', state') -> let! intercepted = intercept stream (token', state') @@ -1054,70 +1052,92 @@ module Caching = (cache : ICache) (prefix : string) (slidingExpiration : TimeSpan) - (category : ICategory<'event, 'state, Container*string, 'context>) - : ICategory<'event, 'state, Container*string, 'context> = + (category : ICategory<'event, 'state, string, 'context>) + : ICategory<'event, 'state, string, 'context> = let mkCacheEntry (initialToken : StreamToken, initialState : 'state) = CacheEntry<'state>(initialToken, initialState, Token.supersedes) let options = CacheItemOptions.RelativeExpiration slidingExpiration let addOrUpdateSlidingExpirationCacheEntry streamName value = cache.UpdateIfNewer(prefix + streamName, options, mkCacheEntry value) CategoryTee<'event, 'state, 'context>(category, addOrUpdateSlidingExpirationCacheEntry) :> _ -type private Folder<'event, 'state, 'context> +type internal Folder<'event, 'state, 'context> ( category: Category<'event, 'state, 'context>, fold: 'state -> 'event seq -> 'state, initial: 'state, isOrigin: 'event -> bool, mapUnfolds: Choice 'state -> 'event seq),('event list -> 'state -> 'event list * 'event list)>, ?readCache) = let inspectUnfolds = match mapUnfolds with Choice1Of3 () -> false | _ -> true - let batched log containerStream = category.Load inspectUnfolds containerStream fold initial isOrigin log - interface ICategory<'event, 'state, Container*string, 'context> with - member __.Load(log, (container,streamName), opt): Async = + let batched log stream = category.Load(log, stream, initial, inspectUnfolds, fold, isOrigin) + interface ICategory<'event, 'state, string, 'context> with + member __.Load(log, streamName, opt): Async = match readCache with - | None -> batched log (container,streamName) + | None -> batched log streamName | Some (cache : ICache, prefix : string) -> async { match! cache.TryGet(prefix + streamName) with - | None -> return! batched log (container,streamName) - | Some tokenAndState when opt = Some AllowStale -> return tokenAndState - | Some tokenAndState -> return! category.LoadFromToken tokenAndState fold isOrigin log } + | None -> return! batched log streamName + | Some tokenAndState when opt = Some Equinox.AllowStale -> return tokenAndState + | Some (token, state) -> return! category.LoadFromToken(log, token, state, fold, isOrigin) } member __.TrySync(log : ILogger, streamToken, state, events : 'event list, context) : Async> = async { - let! res = category.Sync((streamToken,state), events, mapUnfolds, fold, isOrigin, log, context) - match res with + match! category.Sync(log, streamToken, state, events, mapUnfolds, fold, isOrigin, context) with | SyncResult.Conflict resync -> return SyncResult.Conflict resync | SyncResult.Written (token',state') -> return SyncResult.Written (token',state') } -/// Holds Container state, coordinating initialization activities -type private ContainerWrapper(container : Container, ?initContainer : Container -> Async) = - let initGuard = initContainer |> Option.map (fun init -> AsyncCacheCell(init container)) +namespace Equinox.CosmosStore - member __.Container = container - member internal __.InitializationGate = match initGuard with Some g when not (g.IsValid()) -> Some g.AwaitValue | _ -> None +open Equinox +open Equinox.Core +open Equinox.CosmosStore.Core +open FsCodec +open Microsoft.Azure.Cosmos +open Serilog +open System -/// Defines a process for mapping from a Stream Name to the appropriate storage area, allowing control over segregation / co-locating of data -type Containers(categoryAndIdToDatabaseContainerStream : string -> string -> string*string*string, []?disableInitialization) = +/// Holds all relevant state for a Store within a given CosmosDB Database +/// - The CosmosDB CosmosClient (there should be a single one of these per process, plus an optional fallback one for pruning scenarios) +/// - The (singleton) per Container Stored Procedure initialization state +type CosmosStoreConnection + ( /// Facilitates custom mapping of Stream Category Name to underlying Cosmos Database/Container names + categoryAndStreamNameToDatabaseContainerStream : string * string -> string * string * string, + createContainer : string * string -> Container, + /// Admits a hook to enable customization of how Equinox.CosmosStore handles the low level interactions with the underlying CosmosContainer. + []?createGateway, + /// Inhibit CreateStoredProcedureIfNotExists when a given Container is used for the first time + []?disableInitialization) = + let createGateway = match createGateway with Some creator -> creator | None -> id // Index of database*collection -> Initialization Context - let wrappers = ConcurrentDictionary() - new (databaseId, containerId) = - // TOCONSIDER - this works to support the Core.Events APIs - let genStreamName categoryName streamId = if categoryName = null then streamId else sprintf "%s-%s" categoryName streamId - Containers(fun categoryName streamId -> databaseId, containerId, genStreamName categoryName streamId) - - member internal __.Resolve(client : CosmosClient, categoryName, id, init) : (Container*string) * (unit -> Async) option = - let databaseId, containerName, streamName = categoryAndIdToDatabaseContainerStream categoryName id - let init = match disableInitialization with Some true -> None | _ -> Some init - let wrapped = wrappers.GetOrAdd((databaseId,containerName), fun (d,c) -> ContainerWrapper(client.GetContainer(d, c), ?initContainer = init)) - (wrapped.Container,streamName),wrapped.InitializationGate - -/// Pairs a Gateway, defining the retry policies for CosmosDb with a Containers map defining mappings from (category,id) to (databaseId,containerId,streamName) -type Context(gateway: Gateway, containers: Containers, [] ?log) = - let init = gateway.CreateSyncStoredProcIfNotExists log - new(gateway: Gateway, databaseId: string, containerId: string, []?log) = - Context(gateway, Containers(databaseId, containerId), ?log = log) - new(connection: Connection, databaseId: string, containerId: string, []?log) = - Context(Gateway(connection, BatchingPolicy()), databaseId, containerId, ?log = log) - - member __.Gateway = gateway - member __.Containers = containers - member internal __.ResolveContainerStream(categoryName, id) : (Container*string) * (unit -> Async) option = - containers.Resolve(gateway.Client, categoryName, id, init) + let containerInitGuards = System.Collections.Concurrent.ConcurrentDictionary() + new(client, databaseId : string, containerId : string, + /// Inhibit CreateStoredProcedureIfNotExists when a given Container is used for the first time + []?disableInitialization, + /// Admits a hook to enable customization of how Equinox.CosmosStore handles the low level interactions with the underlying CosmosContainer. + []?createGateway : Container -> Container) = + let genStreamName (categoryName, streamId) = if categoryName = null then streamId else sprintf "%s-%s" categoryName streamId + let catAndStreamToDatabaseContainerStream (categoryName, streamId) = databaseId, containerId, genStreamName (categoryName, streamId) + let primaryContainer (d, c) = (client : CosmosClient).GetDatabase(d).GetContainer(c) + CosmosStoreConnection(catAndStreamToDatabaseContainerStream, primaryContainer, + ?disableInitialization=disableInitialization, ?createGateway=createGateway) + member internal __.ResolveContainerGuardAndStreamName(categoryName, streamId) : Initialization.ContainerInitializerGuard * string = + let databaseId, containerId, streamName = categoryAndStreamNameToDatabaseContainerStream (categoryName, streamId) + let createContainerInitializerGuard (d, c) = + let init = + if Some true = disableInitialization then None + else Some (fun cosmosContainer -> Initialization.createSyncStoredProcIfNotExists None cosmosContainer |> Async.Ignore) + let primaryContainer = createContainer (d, c) + Initialization.ContainerInitializerGuard(createGateway primaryContainer, ?initContainer = init) + let g = containerInitGuards.GetOrAdd((databaseId, containerId), createContainerInitializerGuard) + g, streamName + +/// Defines a set of related access policies for a given CosmosDB, together with a Containers map defining mappings from (category,id) to (databaseId,containerId,streamName) +type CosmosStoreContext(connection : CosmosStoreConnection, batchingPolicy, retryPolicy) = + new(connection : CosmosStoreConnection, ?defaultMaxItems, ?getDefaultMaxItems, ?maxRequests, ?readRetryPolicy, ?writeRetryPolicy) = + let retry = RetryPolicy(?readRetryPolicy = readRetryPolicy, ?writeRetryPolicy = writeRetryPolicy) + let batching = BatchingPolicy(?defaultMaxItems = defaultMaxItems, ?getDefaultMaxItems = getDefaultMaxItems, ?maxRequests = maxRequests) + CosmosStoreContext(connection, batching, retry) + member __.Batching = batchingPolicy + member __.Retries = retryPolicy + member internal __.ResolveContainerClientAndStreamIdAndInit(categoryName, streamId) = + let cg, streamId = connection.ResolveContainerGuardAndStreamName(categoryName, streamId) + let store = StoreClient(cg.Container, batchingPolicy, retryPolicy) + store, streamId, cg.InitializationGate [] type CachingStrategy = @@ -1165,7 +1185,7 @@ type AccessStrategy<'event,'state> = /// | Custom of isOrigin: ('event -> bool) * transmute: ('event list -> 'state -> 'event list*'event list) -type Resolver<'event, 'state, 'context>(context : Context, codec, fold, initial, caching, access) = +type CosmosStoreCategory<'event, 'state, 'context>(context : CosmosStoreContext, codec, fold, initial, caching, access) = let readCacheOption = match caching with | CachingStrategy.NoCaching -> None @@ -1178,15 +1198,18 @@ type Resolver<'event, 'state, 'context>(context : Context, codec, fold, initial, | AccessStrategy.MultiSnapshot (isOrigin, unfold) -> isOrigin, Choice2Of3 (fun _ state -> unfold state) | AccessStrategy.RollingState toSnapshot -> (fun _ -> true), Choice3Of3 (fun _ state -> [],[toSnapshot state]) | AccessStrategy.Custom (isOrigin,transmute) -> isOrigin, Choice3Of3 transmute - let cosmosCat = Category<'event, 'state, 'context>(context.Gateway, codec) - let folder = Folder<'event, 'state, 'context>(cosmosCat, fold, initial, isOrigin, mapUnfolds, ?readCache = readCacheOption) - let category : ICategory<_, _, Container*string, 'context> = - match caching with - | CachingStrategy.NoCaching -> folder :> _ - | CachingStrategy.SlidingWindow(cache, window) -> - Caching.applyCacheUpdatesWithSlidingExpiration cache null window folder - - let resolveStream (streamId, maybeContainerInitializationGate) opt context = + let categories = System.Collections.Concurrent.ConcurrentDictionary>() + let resolveCategory (categoryName, container) = + let createCategory _name = + let cosmosCat = Category<'event, 'state, 'context>(container, codec) + let folder = Folder<'event, 'state, 'context>(cosmosCat, fold, initial, isOrigin, mapUnfolds, ?readCache = readCacheOption) + match caching with + | CachingStrategy.NoCaching -> folder :> ICategory<_, _, string, 'context> + | CachingStrategy.SlidingWindow(cache, window) -> Caching.applyCacheUpdatesWithSlidingExpiration cache null window folder + categories.GetOrAdd(categoryName, createCategory) + + let resolveStream (categoryName, container, streamId, maybeContainerInitializationGate) opt context = + let category = resolveCategory (categoryName, container) { new IStream<'event, 'state> with member __.Load log = category.Load(log, streamId, opt) member __.TrySync(log: ILogger, token: StreamToken, originState: 'state, events: 'event list) = @@ -1196,71 +1219,62 @@ type Resolver<'event, 'state, 'context>(context : Context, codec, fold, initial, do! init () return! category.TrySync(log, token, originState, events, context) } } - let resolveTarget = function - | StreamName.CategoryAndId (categoryName, streamId) -> context.ResolveContainerStream(categoryName, streamId) - - member __.Resolve(streamName : StreamName, []?option, []?context) = - match resolveTarget streamName, option with - | streamArgs,(None|Some AllowStale) -> resolveStream streamArgs option context - | (containerStream,maybeInit),Some AssumeEmpty -> - Stream.ofMemento (Token.create containerStream Position.fromKnownEmpty,initial) (resolveStream (containerStream,maybeInit) option context) - - member __.FromMemento(Token.Unpack (container,stream,_pos) as streamToken,state) = + let resolveStreamConfig = function + | StreamName.CategoryAndId (categoryName, streamId) -> + let containerClient, streamId, init = context.ResolveContainerClientAndStreamIdAndInit(categoryName, streamId) + categoryName, containerClient, streamId, init + + member __.Resolve + ( streamName : StreamName, + /// Resolver options + []?option, + /// Context to be passed to IEventCodec + []?context) = + match resolveStreamConfig streamName, option with + | streamArgs,(None|Some AllowStale) -> + resolveStream streamArgs option context + | (_, _, streamId, _) as streamArgs,Some AssumeEmpty -> + let stream = resolveStream streamArgs option context + Stream.ofMemento (Token.create streamId Position.fromKnownEmpty,initial) stream + + member __.FromMemento + ( Token.Unpack (stream,_pos) as streamToken, state) = let skipInitialization = None - Stream.ofMemento (streamToken,state) (resolveStream ((container,stream),skipInitialization) None None) + let (categoryName, container, streamId, _maybeInit) = resolveStreamConfig (StreamName.parse stream) + let stream = resolveStream (categoryName, container, streamId, skipInitialization) None None + Stream.ofMemento (streamToken,state) stream [] type Discovery = - | UriAndKey of databaseUri:Uri * key:string - /// Implements connection string parsing logic curiously missing from the CosmosDB SDK - static member FromConnectionString (connectionString: string) = - match connectionString with - | _ when String.IsNullOrWhiteSpace connectionString -> nullArg "connectionString" - | Regex.Match "^\s*AccountEndpoint\s*=\s*([^;\s]+)\s*;\s*AccountKey\s*=\s*([^;\s]+)\s*;?\s*$" m -> - let uri = m.Groups.[1].Value - let key = m.Groups.[2].Value - UriAndKey (Uri uri, key) - | _ -> invalidArg "connectionString" "unrecognized connection string format; must be `AccountEndpoint=https://...;AccountKey=...=;`" - -type Connector + /// Separated Account Uri and Key (for interop with previous versions) + | AccountUriAndKey of accountUri: Uri * key:string + /// Cosmos SDK Connection String + | ConnectionString of connectionString : string + +type CosmosStoreClientFactory ( /// Timeout to apply to individual reads/write round-trips going to CosmosDb requestTimeout: TimeSpan, - /// Maximum number of times attempt when failure reason is a 429 from CosmosDb, signifying RU limits have been breached + /// Maximum number of times to attempt when failure reason is a 429 from CosmosDb, signifying RU limits have been breached maxRetryAttemptsOnRateLimitedRequests: int, /// Maximum number of seconds to wait (especially if a higher wait delay is suggested by CosmosDb in the 429 response) - // naming matches SDK ver >=3 maxRetryWaitTimeOnRateLimitedRequests: TimeSpan, - /// Log to emit connection messages to - log : ILogger, /// Connection limit for Gateway Mode (default 1000) []?gatewayModeMaxConnectionLimit, /// Connection mode (default: ConnectionMode.Gateway (lowest perf, least trouble)) []?mode : ConnectionMode, - /// consistency mode (default: ConsistencyLevel.Session) + /// consistency mode (default: ConsistencyLevel.Session) []?defaultConsistencyLevel : ConsistencyLevel, - - /// Retries for read requests, over and above those defined by the mandatory policies - []?readRetryPolicy, - /// Retries for write requests, over and above those defined by the mandatory policies - []?writeRetryPolicy, - /// Additional strings identifying the context of this connection; should provide enough context to disambiguate all potential connections to a cluster - /// NB as this will enter server and client logs, it should not contain sensitive information - []?tags : (string*string) seq, /// Inhibits certificate verification when set to true, i.e. for working with the CosmosDB Emulator (default false) []?bypassCertificateValidation : bool) = - do if log = null then nullArg "log" - - let logName (uri : Uri) name = - let name = String.concat ";" <| seq { - yield name - match tags with None -> () | Some tags -> for key, value in tags do yield sprintf "%s=%s" key value } - let sanitizedName = name.Replace('\'','_').Replace(':','_') // sic; Align with logging for ES Adapter - log.ForContext("uri", uri).Information("CosmosDb Connecting {connectionName}", sanitizedName) - /// ClientOptions for this Connector as configured - member val ClientOptions = + /// CosmosClientOptions for this Connector as configured + member val Options = let maxAttempts, maxWait, timeout = Nullable maxRetryAttemptsOnRateLimitedRequests, Nullable maxRetryWaitTimeOnRateLimitedRequests, requestTimeout - let co = CosmosClientOptions(MaxRetryAttemptsOnRateLimitedRequests = maxAttempts, MaxRetryWaitTimeOnRateLimitedRequests = maxWait, RequestTimeout = timeout) + let co = + CosmosClientOptions( + MaxRetryAttemptsOnRateLimitedRequests=maxAttempts, + MaxRetryWaitTimeOnRateLimitedRequests=maxWait, + RequestTimeout=timeout) match mode with | Some ConnectionMode.Direct -> co.ConnectionMode <- ConnectionMode.Direct | None | Some ConnectionMode.Gateway | Some _ (* enum total match :( *) -> co.ConnectionMode <- ConnectionMode.Gateway // default; only supports Https @@ -1277,30 +1291,13 @@ type Connector co.HttpClientFactory <- fun () -> new System.Net.Http.HttpClient(ch) co - /// Yields a CosmosClient configured and connected the requested `discovery` strategy - member __.CreateClient - ( /// Name should be sufficient to uniquely identify this connection within a single app instance's logs - name, discovery : Discovery, - /// true to inhibit logging of client name - []?skipLog) : CosmosClient = - let (Discovery.UriAndKey (databaseUri=uri; key=key)) = discovery - if skipLog <> Some true then logName uri name - new CosmosClient(string uri, key, __.ClientOptions) - - /// Yields a Connection configured per the specified strategy - /// NOTE this is still Async for backcompat, but initialization has been removed per https://github.com/Azure/azure-cosmos-dotnet-v3/issues/1436 - member __.Connect - ( /// Name should be sufficient to uniquely identify this connection within a single app instance's logs - name, discovery : Discovery, - /// true to inhibit logging of client name - []?skipLog) : Async = async { - let client = __.CreateClient(name, discovery, ?skipLog=skipLog) - return Connection(client, ?readRetryPolicy=readRetryPolicy, ?writeRetryPolicy=writeRetryPolicy) } - -namespace Equinox.Cosmos.Core - -open Equinox.Cosmos -open Equinox.Cosmos.Store + abstract member Create: discovery: Discovery -> CosmosClient + default __.Create discovery = discovery |> function + | Discovery.AccountUriAndKey (accountUri=uri; key=key) -> new CosmosClient(string uri, key, __.Options) + | Discovery.ConnectionString cs -> new CosmosClient(cs, __.Options) + +namespace Equinox.CosmosStore.Core + open FsCodec open FSharp.Control open System.Runtime.InteropServices @@ -1312,24 +1309,19 @@ type AppendResult<'t> = | Conflict of index: 't * conflictingEvents: ITimelineEvent[] | ConflictUnknown of index: 't -/// Encapsulates the core facilities Equinox.Cosmos offers for operating directly on Events in Streams. -type Context - ( /// Connection to CosmosDb, includes defined Transient Read and Write Retry policies - conn : Connection, - /// Container selector, mapping Stream Categories to Containers - containers : Containers, +/// Encapsulates the core facilities Equinox.CosmosStore offers for operating directly on Events in Streams. +type EventsContext internal + ( context : Equinox.CosmosStore.CosmosStoreContext, store : StoreClient, /// Logger to write to - see https://github.com/serilog/serilog/wiki/Provided-Sinks for how to wire to your logger log : Serilog.ILogger, /// Optional maximum number of Store.Batch records to retrieve as a set (how many Events are placed therein is controlled by average batch size when appending events /// Defaults to 10 []?defaultMaxItems, - /// Alternate way of specifying defaultMaxItems which facilitates reading it from a cached dynamic configuration + /// Alternate way of specifying defaultMaxItems that facilitates reading it from a cached dynamic configuration []?getDefaultMaxItems) = do if log = null then nullArg "log" let getDefaultMaxItems = match getDefaultMaxItems with Some f -> f | None -> fun () -> defaultArg defaultMaxItems 10 let batching = BatchingPolicy(getDefaultMaxItems=getDefaultMaxItems) - let gateway = Gateway(conn, batching) - let maxCountPredicate count = let acc = ref (max (count-1) 0) fun _ -> @@ -1338,16 +1330,22 @@ type Context false let yieldPositionAndData res = async { - let! (Token.Unpack (_,_,pos')), data = res + let! (Token.Unpack (_,pos')), data = res return pos', data } - member __.ResolveStream(streamName) = containers.Resolve(conn.Client, null, streamName, gateway.CreateSyncStoredProcIfNotExists (Some log)) - member __.CreateStream(streamName) = __.ResolveStream streamName |> fst + new (context : Equinox.CosmosStore.CosmosStoreContext, log, ?defaultMaxItems, ?getDefaultMaxItems) = + let storeClient, _streamId, _ = context.ResolveContainerClientAndStreamIdAndInit(null, null) + EventsContext(context, storeClient, log, ?defaultMaxItems=defaultMaxItems, ?getDefaultMaxItems=getDefaultMaxItems) + + member __.ResolveStream(streamName) = + let _cc, streamId, init = context.ResolveContainerClientAndStreamIdAndInit(null, streamName) + streamId, init + member __.StreamId(streamName) : string = __.ResolveStream streamName |> fst - member internal __.GetLazy((stream, startPos), ?batchSize, ?direction) : AsyncSeq[]> = + member internal __.GetLazy(stream, ?batchSize, ?direction, ?startPos) : AsyncSeq[]> = let direction = defaultArg direction Direction.Forward let batching = BatchingPolicy(defaultArg batchSize batching.MaxItems) - gateway.ReadLazy batching log stream direction startPos (Some,fun _ -> false) + store.ReadLazy(log, batching, stream, direction, startPos, (Some,fun _ -> false)) member internal __.GetInternal((stream, startPos), ?maxCount, ?direction) = async { let direction = defaultArg direction Direction.Forward @@ -1359,18 +1357,18 @@ type Context match maxCount with | Some limit -> maxCountPredicate limit | None -> fun _ -> false - return! gateway.Read log stream direction startPos (Some,isOrigin) } + return! store.Read(log, stream, direction, (Some,isOrigin), startPos) } /// Establishes the current position of the stream in as efficient a manner as possible /// (The ideal situation is that the preceding token is supplied as input in order to avail of 1RU low latency state checks) member __.Sync(stream, ?position: Position) : Async = async { - let! (Token.Unpack (_,_,pos')) = gateway.GetPosition(log, stream, ?pos=position) + let! (Token.Unpack (_,pos')) = store.GetPosition(log, stream, ?pos=position) return pos' } /// Reads in batches of `batchSize` from the specified `Position`, allowing the reader to efficiently walk away from a running query /// ... NB as long as they Dispose! member __.Walk(stream, batchSize, ?position, ?direction) : AsyncSeq[]> = - __.GetLazy((stream, position), batchSize, ?direction=direction) + __.GetLazy(stream, batchSize, ?direction=direction, ?startPos=position) /// Reads all Events from a `Position` in a given `direction` member __.Read(stream, ?position, ?maxCount, ?direction) : Async[]> = @@ -1378,7 +1376,7 @@ type Context /// Appends the supplied batch of events, subject to a consistency check based on the `position` /// Callers should implement appropriate idempotent handling, or use Equinox.Stream for that purpose - member __.Sync((container,stream), position, events: IEventData<_>[]) : Async> = async { + member __.Sync(stream, position, events: IEventData<_>[]) : Async> = async { // Writes go through the stored proc, which we need to provision per-collection // Having to do this here in this way is far from ideal, but work on caching, external snapshots and caching is likely // to move this about before we reach a final destination in any case @@ -1386,22 +1384,20 @@ type Context | None -> () | Some init -> do! init () let batch = Sync.mkBatch stream events Seq.empty - let! res = gateway.Sync log (container,stream) (Sync.Exp.Version position.index,batch) - match res with - | InternalSyncResult.Written (Token.Unpack (_,_,pos)) -> return AppendResult.Ok pos - | InternalSyncResult.Conflict (Token.Unpack (_,_,pos),events) -> return AppendResult.Conflict (pos, events) - | InternalSyncResult.ConflictUnknown (Token.Unpack (_,_,pos)) -> return AppendResult.ConflictUnknown pos } + match! store.Sync(log, stream, SyncExp.Version position.index, batch) with + | InternalSyncResult.Written (Token.Unpack (_,pos)) -> return AppendResult.Ok pos + | InternalSyncResult.Conflict (Token.Unpack (_,pos),events) -> return AppendResult.Conflict (pos, events) + | InternalSyncResult.ConflictUnknown (Token.Unpack (_,pos)) -> return AppendResult.ConflictUnknown pos } /// Low level, non-idempotent call appending events to a stream without a concurrency control mechanism in play /// NB Should be used sparingly; Equinox.Stream enables building equivalent equivalent idempotent handling with minimal code. member __.NonIdempotentAppend(stream, events: IEventData<_>[]) : Async = async { - let! res = __.Sync(stream, Position.fromAppendAtEnd, events) - match res with + match! __.Sync(stream, Position.fromAppendAtEnd, events) with | AppendResult.Ok token -> return token | x -> return x |> sprintf "Conflict despite it being disabled %A" |> invalidOp } - member __.Prune((container,stream), beforeIndex) : Async = - gateway.Prune(log, (container,stream), beforeIndex) + member __.Prune(stream, beforeIndex) : Async = + store.Prune(log, stream, beforeIndex) /// Provides mechanisms for building `EventData` records to be supplied to the `Events` API type EventData() = @@ -1413,8 +1409,7 @@ type EventData() = module Events = let private (|PositionIndex|) (x: Position) = x.index let private stripSyncResult (f: Async>): Async> = async { - let! res = f - match res with + match! f with | AppendResult.Ok (PositionIndex index)-> return AppendResult.Ok index | AppendResult.Conflict (PositionIndex index,events) -> return AppendResult.Conflict (index, events) | AppendResult.ConflictUnknown (PositionIndex index) -> return AppendResult.ConflictUnknown index } @@ -1435,49 +1430,49 @@ module Events = /// reading in batches of the specified size. /// Returns an empty sequence if the stream is empty or if the sequence number is larger than the largest /// sequence number in the stream. - let getAll (ctx: Context) (streamName: string) (MinPosition index: int64) (batchSize: int): FSharp.Control.AsyncSeq[]> = - ctx.Walk(ctx.CreateStream streamName, batchSize, ?position=index) + let getAll (ctx: EventsContext) (streamName: string) (MinPosition index: int64) (batchSize: int): AsyncSeq[]> = + ctx.Walk(ctx.StreamId streamName, batchSize, ?position=index) /// Returns an async array of events in the stream starting at the specified sequence number, /// number of events to read is specified by batchSize /// Returns an empty sequence if the stream is empty or if the sequence number is larger than the largest /// sequence number in the stream. - let get (ctx: Context) (streamName: string) (MinPosition index: int64) (maxCount: int): Async[]> = - ctx.Read(ctx.CreateStream streamName, ?position=index, maxCount=maxCount) |> dropPosition + let get (ctx: EventsContext) (streamName: string) (MinPosition index: int64) (maxCount: int): Async[]> = + ctx.Read(ctx.StreamId streamName, ?position=index, maxCount=maxCount) |> dropPosition /// Appends a batch of events to a stream at the specified expected sequence number. /// If the specified expected sequence number does not match the stream, the events are not appended /// and a failure is returned. - let append (ctx: Context) (streamName: string) (index: int64) (events: IEventData<_>[]): Async> = - ctx.Sync(ctx.CreateStream streamName, Position.fromI index, events) |> stripSyncResult + let append (ctx: EventsContext) (streamName: string) (index: int64) (events: IEventData<_>[]): Async> = + ctx.Sync(ctx.StreamId streamName, Position.fromI index, events) |> stripSyncResult /// Appends a batch of events to a stream at the the present Position without any conflict checks. /// NB typically, it is recommended to ensure idempotency of operations by using the `append` and related API as /// this facilitates ensuring consistency is maintained, and yields reduced latency and Request Charges impacts /// (See equivalent APIs on `Context` that yield `Position` values) - let appendAtEnd (ctx: Context) (streamName: string) (events: IEventData<_>[]): Async = - ctx.NonIdempotentAppend(ctx.CreateStream streamName, events) |> stripPosition + let appendAtEnd (ctx: EventsContext) (streamName: string) (events: IEventData<_>[]): Async = + ctx.NonIdempotentAppend(ctx.StreamId streamName, events) |> stripPosition /// Requests deletion of events prior to the specified Index /// Due to the need to preserve ordering of data in the stream, only full batches will be removed /// Returns count of events deleted this time, events that could not be deleted due to partial batches, and the stream's lowest remaining sequence number - let prune (ctx: Context) (streamName: string) (beforeIndex: int64): Async = - ctx.Prune(ctx.CreateStream streamName, beforeIndex) + let prune (ctx: EventsContext) (streamName: string) (beforeIndex: int64): Async = + ctx.Prune(ctx.StreamId streamName, beforeIndex) /// Returns an async sequence of events in the stream backwards starting from the specified sequence number, /// reading in batches of the specified size. /// Returns an empty sequence if the stream is empty or if the sequence number is smaller than the smallest /// sequence number in the stream. - let getAllBackwards (ctx: Context) (streamName: string) (MaxPosition index: int64) (batchSize: int): AsyncSeq[]> = - ctx.Walk(ctx.CreateStream streamName, batchSize, ?position=index, direction=Direction.Backward) + let getAllBackwards (ctx: EventsContext) (streamName: string) (MaxPosition index: int64) (batchSize: int): AsyncSeq[]> = + ctx.Walk(ctx.StreamId streamName, batchSize, ?position=index, direction=Direction.Backward) /// Returns an async array of events in the stream backwards starting from the specified sequence number, /// number of events to read is specified by batchSize /// Returns an empty sequence if the stream is empty or if the sequence number is smaller than the smallest /// sequence number in the stream. - let getBackwards (ctx: Context) (streamName: string) (MaxPosition index: int64) (maxCount: int): Async[]> = - ctx.Read(ctx.CreateStream streamName, ?position=index, maxCount=maxCount, direction=Direction.Backward) |> dropPosition + let getBackwards (ctx: EventsContext) (streamName: string) (MaxPosition index: int64) (maxCount: int): Async[]> = + ctx.Read(ctx.StreamId streamName, ?position=index, maxCount=maxCount, direction=Direction.Backward) |> dropPosition /// Obtains the `index` from the current write Position - let getNextIndex (ctx: Context) (streamName: string) : Async = - ctx.Sync(ctx.CreateStream streamName) |> stripPosition + let getNextIndex (ctx: EventsContext) (streamName: string) : Async = + ctx.Sync(ctx.StreamId streamName) |> stripPosition diff --git a/src/Equinox.Cosmos/Equinox.Cosmos.fsproj b/src/Equinox.CosmosStore/Equinox.CosmosStore.fsproj similarity index 89% rename from src/Equinox.Cosmos/Equinox.Cosmos.fsproj rename to src/Equinox.CosmosStore/Equinox.CosmosStore.fsproj index 3037854d6..887423fea 100644 --- a/src/Equinox.Cosmos/Equinox.Cosmos.fsproj +++ b/src/Equinox.CosmosStore/Equinox.CosmosStore.fsproj @@ -1,7 +1,7 @@  - netstandard2.1 + netstandard2.1 5 false true @@ -11,7 +11,7 @@ - + @@ -23,7 +23,7 @@ - + diff --git a/tests/Equinox.Cosmos.Integration/CosmosFixtures.fs b/tests/Equinox.Cosmos.Integration/CosmosFixtures.fs deleted file mode 100644 index 0240c7d04..000000000 --- a/tests/Equinox.Cosmos.Integration/CosmosFixtures.fs +++ /dev/null @@ -1,37 +0,0 @@ -[] -module Equinox.Cosmos.Integration.CosmosFixtures - -open Equinox.Cosmos -open System - -module Option = - let defaultValue def option = defaultArg option def - -/// Standing up an Equinox instance is necessary to run for test purposes; either: -/// - replace connection below with a connection string or Uri+Key for an initialized Equinox instance -/// - Create a local Equinox via dotnet run cli/Equinox.cli -s $env:EQUINOX_COSMOS_CONNECTION -d test -c $env:EQUINOX_COSMOS_CONTAINER provision -ru 10000 -let private connectToCosmos (log: Serilog.ILogger) name discovery = - Connector(log=log, requestTimeout=TimeSpan.FromSeconds 3., maxRetryAttemptsOnRateLimitedRequests=2, maxRetryWaitTimeOnRateLimitedRequests=TimeSpan.FromMinutes 1.) - .Connect(name, discovery) -let private read env = Environment.GetEnvironmentVariable env |> Option.ofObj -let (|Default|) def name = (read name),def ||> defaultArg - -let connectToSpecifiedCosmosOrSimulator (log: Serilog.ILogger) = - match read "EQUINOX_COSMOS_CONNECTION" with - | None -> - Discovery.UriAndKey(Uri "https://localhost:8081", "C2y6yDjf5/R+ob0N8A7Cgv30VRDJIWEHLM+4QDU5DE2nQ9nDuVTqobD4b8mGGyPMbIZnqyMsEcaGQy67XIw/Jw==") - |> connectToCosmos log "localDocDbSim" - | Some connectionString -> - Discovery.FromConnectionString connectionString - |> connectToCosmos log "EQUINOX_COSMOS_CONNECTION" - -let defaultBatchSize = 500 - -let containers = - Containers( - read "EQUINOX_COSMOS_DATABASE" |> Option.defaultValue "equinox-test", - read "EQUINOX_COSMOS_CONTAINER" |> Option.defaultValue "equinox-test") - -let createCosmosContext connection batchSize = - let gateway = Gateway(connection, BatchingPolicy(defaultMaxItems=batchSize)) - Context(gateway, containers) diff --git a/tests/Equinox.Cosmos.Integration/AsyncBatchingGateTests.fs b/tests/Equinox.CosmosStore.Integration/AsyncBatchingGateTests.fs similarity index 100% rename from tests/Equinox.Cosmos.Integration/AsyncBatchingGateTests.fs rename to tests/Equinox.CosmosStore.Integration/AsyncBatchingGateTests.fs diff --git a/tests/Equinox.Cosmos.Integration/CacheCellTests.fs b/tests/Equinox.CosmosStore.Integration/CacheCellTests.fs similarity index 98% rename from tests/Equinox.Cosmos.Integration/CacheCellTests.fs rename to tests/Equinox.CosmosStore.Integration/CacheCellTests.fs index d9da6f4ae..406cbc7a8 100644 --- a/tests/Equinox.Cosmos.Integration/CacheCellTests.fs +++ b/tests/Equinox.CosmosStore.Integration/CacheCellTests.fs @@ -1,4 +1,4 @@ -module Equinox.Cosmos.Integration.CacheCellTests +module Equinox.CosmosStore.Integration.CacheCellTests open Equinox.Core open Swensen.Unquote diff --git a/tests/Equinox.Cosmos.Integration/CosmosCoreIntegration.fs b/tests/Equinox.CosmosStore.Integration/CosmosCoreIntegration.fs similarity index 86% rename from tests/Equinox.Cosmos.Integration/CosmosCoreIntegration.fs rename to tests/Equinox.CosmosStore.Integration/CosmosCoreIntegration.fs index dbc3d2b6d..b823ed34e 100644 --- a/tests/Equinox.Cosmos.Integration/CosmosCoreIntegration.fs +++ b/tests/Equinox.CosmosStore.Integration/CosmosCoreIntegration.fs @@ -1,7 +1,7 @@ -module Equinox.Cosmos.Integration.CoreIntegration +module Equinox.CosmosStore.Integration.CoreIntegration -open Equinox.Cosmos.Core -open Equinox.Cosmos.Integration.Infrastructure +open Equinox.CosmosStore.Core +open Equinox.CosmosStore.Integration.Infrastructure open FsCodec open FSharp.Control open Newtonsoft.Json.Linq @@ -29,9 +29,9 @@ type Tests(testOutputHelper) = let (|TestStream|) (name: Guid) = incr testIterations sprintf "events-%O-%i" name !testIterations - let mkContextWithItemLimit conn defaultBatchSize = - Context(conn,containers,log,?defaultMaxItems=defaultBatchSize) - let mkContext conn = mkContextWithItemLimit conn None + let mkContextWithItemLimit log defaultBatchSize = + createPrimaryEventsContext log defaultBatchSize + let mkContext log = mkContextWithItemLimit log None let verifyRequestChargesMax rus = let tripRequestCharges = [ for e, c in capture.RequestCharges -> sprintf "%A" e, c ] @@ -39,14 +39,14 @@ type Tests(testOutputHelper) = [] let append (TestStream streamName) = Async.RunSynchronously <| async { - let! conn = connectToSpecifiedCosmosOrSimulator log - let ctx = mkContext conn + let ctx = mkContext log + capture.Clear() let index = 0L let! res = Events.append ctx streamName index <| TestEvents.Create(0,1) test <@ AppendResult.Ok 1L = res @> test <@ [EqxAct.Append] = capture.ExternalCalls @> - verifyRequestChargesMax 33 // 32.27 // WAS 10 + verifyRequestChargesMax 34 // 33.07 // WAS 10 // Clear the counters capture.Clear() @@ -54,15 +54,14 @@ type Tests(testOutputHelper) = test <@ AppendResult.Ok 6L = res @> test <@ [EqxAct.Append] = capture.ExternalCalls @> // We didnt request small batches or splitting so it's not dramatically more expensive to write N events - verifyRequestChargesMax 39 // 38.74 // was 11 + verifyRequestChargesMax 41 // 40.68 // was 11 } // It's conceivable that in the future we might allow zero-length batches as long as a sync mechanism leveraging the etags and unfolds update mechanisms // As it stands with the NoTipEvents stored proc, permitting empty batches a) yields an invalid state b) provides no conceivable benefit [] let ``append Throws when passed an empty batch`` (TestStream streamName) = Async.RunSynchronously <| async { - let! conn = connectToSpecifiedCosmosOrSimulator log - let ctx = mkContext conn + let ctx = mkContext log let index = 0L let! res = Events.append ctx streamName index (TestEvents.Create(0,0)) |> Async.Catch @@ -93,22 +92,22 @@ type Tests(testOutputHelper) = let verifyCorrectEventsEx direction baseIndex (expected: IEventData<_>[]) (xs: ITimelineEvent[]) = let xs, baseIndex = - if direction = Equinox.Cosmos.Store.Direction.Forward then xs, baseIndex + if direction = Equinox.CosmosStore.Core.Direction.Forward then xs, baseIndex else Array.rev xs, baseIndex - int64 (Array.length expected) + 1L test <@ [for i in 0..expected.Length - 1 -> baseIndex + int64 i] = [for r in xs -> r.Index] @> test <@ [for e in expected -> e.EventType] = [ for r in xs -> r.EventType ] @> for i,x,y in Seq.mapi2 (fun i x y -> i,x,y) [for e in expected -> e.Data] [for r in xs -> r.Data] do verifyUtf8JsonEquals i x y - let verifyCorrectEventsBackward = verifyCorrectEventsEx Equinox.Cosmos.Store.Direction.Backward - let verifyCorrectEvents = verifyCorrectEventsEx Equinox.Cosmos.Store.Direction.Forward + let verifyCorrectEventsBackward = verifyCorrectEventsEx Equinox.CosmosStore.Core.Direction.Backward + let verifyCorrectEvents = verifyCorrectEventsEx Equinox.CosmosStore.Core.Direction.Forward [] let ``appendAtEnd and getNextIndex`` (extras, TestStream streamName) = Async.RunSynchronously <| async { - let! conn = connectToSpecifiedCosmosOrSimulator log - let ctx = mkContextWithItemLimit conn (Some 1) - // If a fail triggers a rerun, we need to dump the previous log entries captured capture.Clear() + + let ctx = mkContextWithItemLimit log (Some 1) + let! pos = Events.getNextIndex ctx streamName test <@ [EqxAct.TipNotFound] = capture.ExternalCalls @> 0L =! pos @@ -134,7 +133,7 @@ type Tests(testOutputHelper) = pos <- pos + 42L pos =! res test <@ [EqxAct.Append] = capture.ExternalCalls @> - verifyRequestChargesMax 46 // 45.42 // 47.02 // WAS 20 + verifyRequestChargesMax 47 // 46.52 // 47.02 // WAS 20 capture.Clear() let! res = Events.getNextIndex ctx streamName @@ -144,12 +143,12 @@ type Tests(testOutputHelper) = pos =! res // Demonstrate benefit/mechanism for using the Position-based API to avail of the etag tracking - let stream = ctx.CreateStream streamName + let stream = ctx.StreamId streamName let extrasCount = match extras with x when x > 50 -> 5000 | x when x < 1 -> 1 | x -> x*100 let! _pos = ctx.NonIdempotentAppend(stream, TestEvents.Create (int pos,extrasCount)) test <@ [EqxAct.Append] = capture.ExternalCalls @> - verifyRequestChargesMax 149 // 148.11 // 463.01 observed + verifyRequestChargesMax 442 // 441.88 // 463.01 observed capture.Clear() let! pos = ctx.Sync(stream,?position=None) @@ -160,14 +159,12 @@ type Tests(testOutputHelper) = let! _pos = ctx.Sync(stream,pos) test <@ [EqxAct.TipNotModified] = capture.ExternalCalls @> verifyRequestChargesMax 1 // for a 302 by definition - when an etag IfNotMatch is honored, you only pay one RU - capture.Clear() } [] let ``append - fails on non-matching`` (TestStream streamName) = Async.RunSynchronously <| async { capture.Clear() - let! conn = connectToSpecifiedCosmosOrSimulator log - let ctx = mkContext conn + let ctx = mkContext log // Attempt to write, skipping Index 0 let! res = Events.append ctx streamName 1L <| TestEvents.Create(0,1) @@ -182,7 +179,7 @@ type Tests(testOutputHelper) = let! res = Events.append ctx streamName 0L expected test <@ AppendResult.Ok 1L = res @> test <@ [EqxAct.Append] = capture.ExternalCalls @> - verifyRequestChargesMax 33 // 32.05 WAS 11 // 10.33 + verifyRequestChargesMax 36 // 35.78 WAS 11 // 10.33 capture.Clear() // Try overwriting it (a competing consumer would see the same) @@ -209,8 +206,7 @@ type Tests(testOutputHelper) = [] let get (TestStream streamName) = Async.RunSynchronously <| async { capture.Clear() - let! conn = connectToSpecifiedCosmosOrSimulator log - let ctx = mkContextWithItemLimit conn (Some 3) + let ctx = mkContextWithItemLimit log (Some 3) // We're going to ignore the first, to prove we can let! expected = add6EventsIn2Batches ctx streamName @@ -226,8 +222,8 @@ type Tests(testOutputHelper) = [] let ``get in 2 batches`` (TestStream streamName) = Async.RunSynchronously <| async { - let! conn = connectToSpecifiedCosmosOrSimulator log - let ctx = mkContextWithItemLimit conn (Some 1) + capture.Clear() + let ctx = mkContextWithItemLimit log (Some 1) let! expected = add6EventsIn2Batches ctx streamName let expected = expected |> Array.take 3 @@ -242,11 +238,9 @@ type Tests(testOutputHelper) = [] let ``get Lazy`` (TestStream streamName) = Async.RunSynchronously <| async { - let! conn = connectToSpecifiedCosmosOrSimulator log - let ctx = mkContextWithItemLimit conn (Some 1) + let ctx = mkContextWithItemLimit log (Some 1) let! expected = add6EventsIn2Batches ctx streamName - capture.Clear() let! res = Events.getAll ctx streamName 0L 1 |> AsyncSeq.concatSeq |> AsyncSeq.takeWhileInclusive (fun _ -> false) |> AsyncSeq.toArrayAsync let expected = expected |> Array.take 1 @@ -254,7 +248,7 @@ type Tests(testOutputHelper) = verifyCorrectEvents 0L expected res test <@ [EqxAct.ResponseForward; EqxAct.QueryForward] = capture.ExternalCalls @> let queryRoundTripsAndItemCounts = function - | EqxEvent (Equinox.Cosmos.Store.Log.Event.Query (Equinox.Cosmos.Store.Direction.Forward, responses, { count = c })) -> Some (responses,c) + | EqxEvent (Equinox.CosmosStore.Core.Log.Event.Query (Equinox.CosmosStore.Core.Direction.Forward, responses, { count = c })) -> Some (responses,c) | _ -> None // validate that, despite only requesting max 1 item, we only needed one trip (which contained only one item) [1,1] =! capture.ChooseCalls queryRoundTripsAndItemCounts @@ -265,9 +259,7 @@ type Tests(testOutputHelper) = [] let getBackwards (TestStream streamName) = Async.RunSynchronously <| async { - capture.Clear() - let! conn = connectToSpecifiedCosmosOrSimulator log - let ctx = mkContextWithItemLimit conn (Some 1) + let ctx = mkContextWithItemLimit log (Some 1) let! expected = add6EventsIn2Batches ctx streamName @@ -284,8 +276,7 @@ type Tests(testOutputHelper) = [] let ``getBackwards in 2 batches`` (TestStream streamName) = Async.RunSynchronously <| async { - let! conn = connectToSpecifiedCosmosOrSimulator log - let ctx = mkContextWithItemLimit conn (Some 1) + let ctx = mkContextWithItemLimit log (Some 1) let! expected = add6EventsIn2Batches ctx streamName @@ -302,11 +293,9 @@ type Tests(testOutputHelper) = [] let ``getBackwards Lazy`` (TestStream streamName) = Async.RunSynchronously <| async { - let! conn = connectToSpecifiedCosmosOrSimulator log - let ctx = mkContextWithItemLimit conn (Some 1) + let ctx = mkContextWithItemLimit log (Some 1) let! expected = add6EventsIn2Batches ctx streamName - capture.Clear() let! res = Events.getAllBackwards ctx streamName 10L 1 @@ -320,23 +309,21 @@ type Tests(testOutputHelper) = test <@ [EqxAct.ResponseBackward; EqxAct.QueryBackward] = capture.ExternalCalls @> // validate that, despite only requesting max 1 item, we only needed one trip, bearing 5 items (from which one item was omitted) let queryRoundTripsAndItemCounts = function - | EqxEvent (Equinox.Cosmos.Store.Log.Event.Query (Equinox.Cosmos.Store.Direction.Backward, responses, { count = c })) -> Some (responses,c) + | EqxEvent (Equinox.CosmosStore.Core.Log.Event.Query (Equinox.CosmosStore.Core.Direction.Backward, responses, { count = c })) -> Some (responses,c) | _ -> None [1,5] =! capture.ChooseCalls queryRoundTripsAndItemCounts verifyRequestChargesMax 4 // 3.24 // WAS 3 // 2.98 } (* Prune *) + [] let prune (TestStream streamName) = Async.RunSynchronously <| async { - capture.Clear() - let! conn = connectToSpecifiedCosmosOrSimulator log - let ctx = mkContextWithItemLimit conn None + let ctx = mkContextWithItemLimit log None let! expected = add6EventsIn2Batches ctx streamName // Trigger deletion of first batch - capture.Clear() let! deleted, deferred, trimmedPos = Events.prune ctx streamName 5L test <@ deleted = 1 && deferred = 4 && trimmedPos = 1L @> test <@ [EqxAct.PruneResponse; EqxAct.Delete; EqxAct.Prune] = capture.ExternalCalls @> diff --git a/tests/Equinox.CosmosStore.Integration/CosmosFixtures.fs b/tests/Equinox.CosmosStore.Integration/CosmosFixtures.fs new file mode 100644 index 000000000..99ea9e1cb --- /dev/null +++ b/tests/Equinox.CosmosStore.Integration/CosmosFixtures.fs @@ -0,0 +1,44 @@ +[] +module Equinox.CosmosStore.Integration.CosmosFixtures + +open Equinox.CosmosStore +open System + +module Option = + let defaultValue def option = defaultArg option def + +/// Standing up an Equinox instance is necessary to run for test purposes; either: +/// - replace connection below with a connection string or Uri+Key for an initialized Equinox instance +/// - Create a local Equinox via (e.g.) dotnet run cli/Equinox.Tool init -ru 1000 cosmos -s $env:EQUINOX_COSMOS_CONNECTION -d test -c $env:EQUINOX_COSMOS_CONTAINER +let private tryRead env = Environment.GetEnvironmentVariable env |> Option.ofObj +let (|Default|) def name = (tryRead name),def ||> defaultArg + +let private databaseId = tryRead "EQUINOX_COSMOS_DATABASE" |> Option.defaultValue "equinox-test" +let private containerId = tryRead "EQUINOX_COSMOS_CONTAINER" |> Option.defaultValue "equinox-test" + +let discoverConnection () = + match tryRead "EQUINOX_COSMOS_CONNECTION" with + | None -> "localDocDbSim", Discovery.AccountUriAndKey(Uri "https://localhost:8081", "C2y6yDjf5/R+ob0N8A7Cgv30VRDJIWEHLM+4QDU5DE2nQ9nDuVTqobD4b8mGGyPMbIZnqyMsEcaGQy67XIw/Jw==") + | Some connectionString -> "EQUINOX_COSMOS_CONNECTION", Discovery.ConnectionString connectionString + +let createClient (log : Serilog.ILogger) name discovery = + let factory = CosmosStoreClientFactory(requestTimeout=TimeSpan.FromSeconds 3., maxRetryAttemptsOnRateLimitedRequests=2, maxRetryWaitTimeOnRateLimitedRequests=TimeSpan.FromMinutes 1.) + let client = factory.Create discovery + log.Information("CosmosDb Connecting {name} to {endpoint}", name, client.Endpoint) + client + +let connectPrimary (log : Serilog.ILogger) = + let name, discovery = discoverConnection () + let client = createClient log name discovery + CosmosStoreConnection(client, databaseId, containerId) + +let createPrimaryContext (log: Serilog.ILogger) batchSize = + let conn = connectPrimary log + CosmosStoreContext(conn, defaultMaxItems = batchSize) + +let defaultBatchSize = 500 + +let createPrimaryEventsContext log batchSize = + let batchSize = defaultArg batchSize defaultBatchSize + let ctx = createPrimaryContext log batchSize + Equinox.CosmosStore.Core.EventsContext(ctx, log, defaultMaxItems = batchSize) diff --git a/tests/Equinox.Cosmos.Integration/CosmosFixturesInfrastructure.fs b/tests/Equinox.CosmosStore.Integration/CosmosFixturesInfrastructure.fs similarity index 94% rename from tests/Equinox.Cosmos.Integration/CosmosFixturesInfrastructure.fs rename to tests/Equinox.CosmosStore.Integration/CosmosFixturesInfrastructure.fs index 42e4b4e50..5efe35ed9 100644 --- a/tests/Equinox.Cosmos.Integration/CosmosFixturesInfrastructure.fs +++ b/tests/Equinox.CosmosStore.Integration/CosmosFixturesInfrastructure.fs @@ -1,5 +1,5 @@ [] -module Equinox.Cosmos.Integration.Infrastructure +module Equinox.CosmosStore.Integration.Infrastructure open Domain open FsCheck @@ -49,8 +49,8 @@ module SerilogHelpers = let (|SerilogScalar|_|) : Serilog.Events.LogEventPropertyValue -> obj option = function | (:? ScalarValue as x) -> Some x.Value | _ -> None - open Equinox.Cosmos.Store - open Equinox.Cosmos.Store.Log + open Equinox.CosmosStore.Core + open Equinox.CosmosStore.Core.Log [] type EqxAct = | Tip | TipNotFound | TipNotModified @@ -72,7 +72,7 @@ module SerilogHelpers = | Event.PruneResponse _ -> EqxAct.PruneResponse | Event.Delete _ -> EqxAct.Delete | Event.Prune _ -> EqxAct.Prune - let inline (|Stats|) ({ ru = ru }: Equinox.Cosmos.Store.Log.Measurement) = ru + let inline (|Stats|) ({ ru = ru }: Equinox.CosmosStore.Core.Log.Measurement) = ru let (|CosmosReadRc|CosmosWriteRc|CosmosResyncRc|CosmosResponseRc|CosmosDeleteRc|CosmosPruneRc|) = function | Event.Tip (Stats s) | Event.TipNotFound (Stats s) @@ -92,9 +92,9 @@ module SerilogHelpers = EquinoxChargeRollup | CosmosReadRc rc | CosmosWriteRc rc | CosmosResyncRc rc | CosmosDeleteRc rc | CosmosPruneRc rc as e -> CosmosRequestCharge (e,rc) - let (|EqxEvent|_|) (logEvent : LogEvent) : Equinox.Cosmos.Store.Log.Event option = + let (|EqxEvent|_|) (logEvent : LogEvent) : Equinox.CosmosStore.Core.Log.Event option = logEvent.Properties.Values |> Seq.tryPick (function - | SerilogScalar (:? Equinox.Cosmos.Store.Log.Event as e) -> Some e + | SerilogScalar (:? Equinox.CosmosStore.Core.Log.Event as e) -> Some e | _ -> None) let (|HasProp|_|) (name : string) (e : LogEvent) : LogEventPropertyValue option = diff --git a/tests/Equinox.Cosmos.Integration/CosmosIntegration.fs b/tests/Equinox.CosmosStore.Integration/CosmosIntegration.fs similarity index 81% rename from tests/Equinox.Cosmos.Integration/CosmosIntegration.fs rename to tests/Equinox.CosmosStore.Integration/CosmosIntegration.fs index c94f53ce8..b9b53b91e 100644 --- a/tests/Equinox.Cosmos.Integration/CosmosIntegration.fs +++ b/tests/Equinox.CosmosStore.Integration/CosmosIntegration.fs @@ -1,8 +1,8 @@ -module Equinox.Cosmos.Integration.CosmosIntegration +module Equinox.CosmosStore.Integration.CosmosIntegration open Domain -open Equinox.Cosmos -open Equinox.Cosmos.Integration.Infrastructure +open Equinox.CosmosStore +open Equinox.CosmosStore.Integration.Infrastructure open FSharp.UMX open Swensen.Unquote open System @@ -12,45 +12,40 @@ module Cart = let fold, initial = Domain.Cart.Fold.fold, Domain.Cart.Fold.initial let snapshot = Domain.Cart.Fold.isOrigin, Domain.Cart.Fold.snapshot let codec = Domain.Cart.Events.codec - let createServiceWithoutOptimization connection batchSize log = - let store = createCosmosContext connection batchSize - let resolve (id,opt) = Resolver(store, codec, fold, initial, CachingStrategy.NoCaching, AccessStrategy.Unoptimized).Resolve(id,?option=opt) + let createServiceWithoutOptimization log store = + let resolve (id,opt) = CosmosStoreCategory(store, codec, fold, initial, CachingStrategy.NoCaching, AccessStrategy.Unoptimized).Resolve(id,?option=opt) Backend.Cart.create log resolve let projection = "Compacted",snd snapshot /// Trigger looking in Tip (we want those calls to occur, but without leaning on snapshots, which would reduce the paths covered) - let createServiceWithEmptyUnfolds connection batchSize log = - let store = createCosmosContext connection batchSize + let createServiceWithEmptyUnfolds store log = let unfArgs = Domain.Cart.Fold.isOrigin, fun _ -> Seq.empty - let resolve (id,opt) = Resolver(store, codec, fold, initial, CachingStrategy.NoCaching, AccessStrategy.MultiSnapshot unfArgs).Resolve(id,?option=opt) + let resolve (id,opt) = CosmosStoreCategory(store, codec, fold, initial, CachingStrategy.NoCaching, AccessStrategy.MultiSnapshot unfArgs).Resolve(id,?option=opt) Backend.Cart.create log resolve - let createServiceWithSnapshotStrategy connection batchSize log = - let store = createCosmosContext connection batchSize - let resolve (id,opt) = Resolver(store, codec, fold, initial, CachingStrategy.NoCaching, AccessStrategy.Snapshot snapshot).Resolve(id,?option=opt) + let createServiceWithSnapshotStrategy store log = + let resolve (id,opt) = CosmosStoreCategory(store, codec, fold, initial, CachingStrategy.NoCaching, AccessStrategy.Snapshot snapshot).Resolve(id,?option=opt) Backend.Cart.create log resolve - let createServiceWithSnapshotStrategyAndCaching connection batchSize log cache = - let store = createCosmosContext connection batchSize + let createServiceWithSnapshotStrategyAndCaching store log cache = let sliding20m = CachingStrategy.SlidingWindow (cache, TimeSpan.FromMinutes 20.) - let resolve (id,opt) = Resolver(store, codec, fold, initial, sliding20m, AccessStrategy.Snapshot snapshot).Resolve(id,?option=opt) + let resolve (id,opt) = CosmosStoreCategory(store, codec, fold, initial, sliding20m, AccessStrategy.Snapshot snapshot).Resolve(id,?option=opt) Backend.Cart.create log resolve - let createServiceWithRollingState connection log = - let store = createCosmosContext connection 1 + let createServiceWithRollingState store log = let access = AccessStrategy.RollingState Domain.Cart.Fold.snapshot - let resolve (id,opt) = Resolver(store, codec, fold, initial, CachingStrategy.NoCaching, access).Resolve(id,?option=opt) + let resolve (id,opt) = CosmosStoreCategory(store, codec, fold, initial, CachingStrategy.NoCaching, access).Resolve(id,?option=opt) Backend.Cart.create log resolve module ContactPreferences = let fold, initial = Domain.ContactPreferences.Fold.fold, Domain.ContactPreferences.Fold.initial let codec = Domain.ContactPreferences.Events.codec - let createServiceWithoutOptimization createGateway defaultBatchSize log _ignoreWindowSize _ignoreCompactionPredicate = - let gateway = createGateway defaultBatchSize - let resolver = Resolver(gateway, codec, fold, initial, CachingStrategy.NoCaching, AccessStrategy.Unoptimized) - Backend.ContactPreferences.create log resolver.Resolve - let createService log createGateway = - let resolver = Resolver(createGateway 1, codec, fold, initial, CachingStrategy.NoCaching, AccessStrategy.LatestKnownEvent) - Backend.ContactPreferences.create log resolver.Resolve - let createServiceWithLatestKnownEvent createGateway log cachingStrategy = - let resolver = Resolver(createGateway 1, codec, fold, initial, cachingStrategy, AccessStrategy.LatestKnownEvent) - Backend.ContactPreferences.create log resolver.Resolve + let createServiceWithoutOptimization createContext defaultBatchSize log _ignoreWindowSize _ignoreCompactionPredicate = + let context = createContext defaultBatchSize + let resolve = CosmosStoreCategory(context, codec, fold, initial, CachingStrategy.NoCaching, AccessStrategy.Unoptimized).Resolve + Backend.ContactPreferences.create log resolve + let createService log store = + let resolve = CosmosStoreCategory(store, codec, fold, initial, CachingStrategy.NoCaching, AccessStrategy.LatestKnownEvent).Resolve + Backend.ContactPreferences.create log resolve + let createServiceWithLatestKnownEvent store log cachingStrategy = + let resolve = CosmosStoreCategory(store, codec, fold, initial, cachingStrategy, AccessStrategy.LatestKnownEvent).Resolve + Backend.ContactPreferences.create log resolve #nowarn "1182" // From hereon in, we may have some 'unused' privates (the tests) @@ -75,12 +70,12 @@ type Tests(testOutputHelper) = let tripRequestCharges = [ for e, c in capture.RequestCharges -> sprintf "%A" e, c ] test <@ float rus >= Seq.sum (Seq.map snd tripRequestCharges) @> - [] + [] let ``Can roundtrip against Cosmos, correctly batching the reads [without reading the Tip]`` context skuId = Async.RunSynchronously <| async { - let! conn = connectToSpecifiedCosmosOrSimulator log - let maxItemsPerRequest = 5 - let service = Cart.createServiceWithoutOptimization conn maxItemsPerRequest log + let store = createPrimaryContext log maxItemsPerRequest + + let service = Cart.createServiceWithoutOptimization log store capture.Clear() // for re-runs of the test let cartId = % Guid.NewGuid() @@ -103,22 +98,22 @@ type Tests(testOutputHelper) = let expectedResponses = transactions/maxItemsPerRequest + 1 test <@ List.replicate expectedResponses EqxAct.ResponseBackward @ [EqxAct.QueryBackward] = capture.ExternalCalls @> - verifyRequestChargesMax 8 // 7.74 // 10.01 + verifyRequestChargesMax 9 // 8.58 // 10.01 } - [] + [] let ``Can roundtrip against Cosmos, managing sync conflicts by retrying`` ctx initialState = Async.RunSynchronously <| async { let log1, capture1 = log, capture capture1.Clear() - let! conn = connectToSpecifiedCosmosOrSimulator log1 - // Ensure batching is included at some point in the proceedings let batchSize = 3 + let store = createPrimaryContext log1 batchSize + // Ensure batching is included at some point in the proceedings let context, (sku11, sku12, sku21, sku22) = ctx let cartId = % Guid.NewGuid() // establish base stream state - let service1 = Cart.createServiceWithEmptyUnfolds conn batchSize log1 + let service1 = Cart.createServiceWithEmptyUnfolds store log1 let! maybeInitialSku = let (streamEmpty, skuId) = initialState async { @@ -151,7 +146,7 @@ type Tests(testOutputHelper) = do! s4 } let log2, capture2 = TestsWithLogCapture.CreateLoggerWithCapture testOutputHelper use _flush = log2 - let service2 = Cart.createServiceWithEmptyUnfolds conn batchSize log2 + let service2 = Cart.createServiceWithEmptyUnfolds store log2 let t2 = async { // Signal we have state, wait for other to do same, engineer conflict let prepare = async { @@ -192,10 +187,10 @@ type Tests(testOutputHelper) = let singleBatchBackwards = [EqxAct.ResponseBackward; EqxAct.QueryBackward] let batchBackwardsAndAppend = singleBatchBackwards @ [EqxAct.Append] - [] + [] let ``Can correctly read and update against Cosmos with LatestKnownEvent Access Strategy`` value = Async.RunSynchronously <| async { - let! conn = connectToSpecifiedCosmosOrSimulator log - let service = ContactPreferences.createService log (createCosmosContext conn) + let store = createPrimaryContext log 1 + let service = ContactPreferences.createService log store let id = ContactPreferences.Id (let g = System.Guid.NewGuid() in g.ToString "N") //let (Domain.ContactPreferences.Id email) = id () @@ -215,10 +210,10 @@ type Tests(testOutputHelper) = test <@ [EqxAct.Tip; EqxAct.Append; EqxAct.Tip] = capture.ExternalCalls @> } - [] + [] let ``Can correctly read and update Contacts against Cosmos with RollingUnfolds Access Strategy`` value = Async.RunSynchronously <| async { - let! conn = connectToSpecifiedCosmosOrSimulator log - let service = ContactPreferences.createServiceWithLatestKnownEvent (createCosmosContext conn) log CachingStrategy.NoCaching + let store = createPrimaryContext log 1 + let service = ContactPreferences.createServiceWithLatestKnownEvent store log CachingStrategy.NoCaching let id = ContactPreferences.Id (let g = System.Guid.NewGuid() in g.ToString "N") // Feed some junk into the stream @@ -241,13 +236,13 @@ type Tests(testOutputHelper) = let ``Can roundtrip Cart against Cosmos with RollingUnfolds, detecting conflicts based on _etag`` ctx initialState = Async.RunSynchronously <| async { let log1, capture1 = log, capture capture1.Clear() - let! conn = connectToSpecifiedCosmosOrSimulator log1 + let store = createPrimaryContext log1 1 let context, (sku11, sku12, sku21, sku22) = ctx let cartId = % Guid.NewGuid() // establish base stream state - let service1 = Cart.createServiceWithRollingState conn log1 + let service1 = Cart.createServiceWithRollingState store log1 let! maybeInitialSku = let (streamEmpty, skuId) = initialState async { @@ -280,7 +275,7 @@ type Tests(testOutputHelper) = do! s4 } let log2, capture2 = TestsWithLogCapture.CreateLoggerWithCapture testOutputHelper use _flush = log2 - let service2 = Cart.createServiceWithRollingState conn log2 + let service2 = Cart.createServiceWithRollingState store log2 let t2 = async { // Signal we have state, wait for other to do same, engineer conflict let prepare = async { @@ -312,11 +307,11 @@ type Tests(testOutputHelper) = && [EqxAct.Resync] = c2 @> } - [] + [] let ``Can roundtrip against Cosmos, using Snapshotting to avoid queries`` context skuId = Async.RunSynchronously <| async { - let! conn = connectToSpecifiedCosmosOrSimulator log let batchSize = 10 - let createServiceIndexed () = Cart.createServiceWithSnapshotStrategy conn batchSize log + let store = createPrimaryContext log batchSize + let createServiceIndexed () = Cart.createServiceWithSnapshotStrategy store log let service1, service2 = createServiceIndexed (), createServiceIndexed () capture.Clear() @@ -341,10 +336,10 @@ type Tests(testOutputHelper) = [] let ``Can roundtrip against Cosmos, correctly using Snapshotting and Cache to avoid redundant reads`` context skuId = Async.RunSynchronously <| async { - let! conn = connectToSpecifiedCosmosOrSimulator log let batchSize = 10 + let store = createPrimaryContext log batchSize let cache = Equinox.Cache("cart", sizeMb = 50) - let createServiceCached () = Cart.createServiceWithSnapshotStrategyAndCaching conn batchSize log cache + let createServiceCached () = Cart.createServiceWithSnapshotStrategyAndCaching store log cache let service1, service2 = createServiceCached (), createServiceCached () capture.Clear() diff --git a/tests/Equinox.Cosmos.Integration/Equinox.Cosmos.Integration.fsproj b/tests/Equinox.CosmosStore.Integration/Equinox.CosmosStore.Integration.fsproj similarity index 77% rename from tests/Equinox.Cosmos.Integration/Equinox.Cosmos.Integration.fsproj rename to tests/Equinox.CosmosStore.Integration/Equinox.CosmosStore.Integration.fsproj index 90ec9bbab..b9d8ae35a 100644 --- a/tests/Equinox.Cosmos.Integration/Equinox.Cosmos.Integration.fsproj +++ b/tests/Equinox.CosmosStore.Integration/Equinox.CosmosStore.Integration.fsproj @@ -1,7 +1,7 @@  - netcoreapp3.1 + netcoreapp3.1 false 5 true @@ -20,17 +20,16 @@ - + - - + + - diff --git a/tests/Equinox.Cosmos.Integration/JsonConverterTests.fs b/tests/Equinox.CosmosStore.Integration/JsonConverterTests.fs similarity index 73% rename from tests/Equinox.Cosmos.Integration/JsonConverterTests.fs rename to tests/Equinox.CosmosStore.Integration/JsonConverterTests.fs index fc283b40c..67f5d2861 100644 --- a/tests/Equinox.Cosmos.Integration/JsonConverterTests.fs +++ b/tests/Equinox.CosmosStore.Integration/JsonConverterTests.fs @@ -1,6 +1,6 @@ -module Equinox.Cosmos.Integration.JsonConverterTests +module Equinox.CosmosStore.Integration.JsonConverterTests -open Equinox.Cosmos +open Equinox.CosmosStore open FsCheck.Xunit open Newtonsoft.Json open Swensen.Unquote @@ -21,7 +21,7 @@ type Base64ZipUtf8Tests() = [] let ``serializes, achieving compression`` () = let encoded = eventCodec.Encode(None,A { embed = String('x',5000) }) - let e : Store.Unfold = + let e : Core.Unfold = { i = 42L c = encoded.EventType d = encoded.Data @@ -32,14 +32,8 @@ type Base64ZipUtf8Tests() = [] let roundtrips value = - let hasNulls = - match value with - | A x | B x when obj.ReferenceEquals(null, x) -> true - | A { embed = x } | B { embed = x } -> obj.ReferenceEquals(null, x) - if hasNulls then () else - let encoded = eventCodec.Encode(None,value) - let e : Store.Unfold = + let e : Core.Unfold = { i = 42L c = encoded.EventType d = encoded.Data @@ -47,7 +41,8 @@ type Base64ZipUtf8Tests() = t = DateTimeOffset.MinValue } let ser = JsonConvert.SerializeObject(e) test <@ ser.Contains("\"d\":\"") @> - let des = JsonConvert.DeserializeObject(ser) + System.Diagnostics.Trace.WriteLine ser + let des = JsonConvert.DeserializeObject(ser) let d = FsCodec.Core.TimelineEvent.Create(-1L, des.c, des.d) let decoded = eventCodec.TryDecode d |> Option.get - test <@ value = decoded @> \ No newline at end of file + test <@ value = decoded @> diff --git a/tools/Equinox.Tool/Equinox.Tool.fsproj b/tools/Equinox.Tool/Equinox.Tool.fsproj index 517b966f0..63413fad2 100644 --- a/tools/Equinox.Tool/Equinox.Tool.fsproj +++ b/tools/Equinox.Tool/Equinox.Tool.fsproj @@ -38,6 +38,9 @@ + + + diff --git a/tools/Equinox.Tool/Program.fs b/tools/Equinox.Tool/Program.fs index ee0e1e7e4..7365194cb 100644 --- a/tools/Equinox.Tool/Program.fs +++ b/tools/Equinox.Tool/Program.fs @@ -105,7 +105,7 @@ and DumpInfo(args: ParseResults) = match args.TryGetSubCommand() with | Some (DumpArguments.Cosmos sargs) -> let storeLog = createStoreLog <| sargs.Contains Storage.Cosmos.Arguments.VerboseStore - storeLog, Storage.Cosmos.config (log,storeLog) storeConfig (Storage.Cosmos.Info sargs) + storeLog, Storage.Cosmos.config log storeConfig (Storage.Cosmos.Info sargs) | Some (DumpArguments.Es sargs) -> let storeLog = createStoreLog <| sargs.Contains Storage.EventStore.Arguments.VerboseStore storeLog, Storage.EventStore.config (log,storeLog) storeConfig sargs @@ -179,7 +179,7 @@ and TestInfo(args: ParseResults) = | Some (Cosmos sargs) -> let storeLog = createStoreLog <| sargs.Contains Storage.Cosmos.Arguments.VerboseStore log.Information("Running transactions in-process against CosmosDb with storage options: {options:l}", __.Options) - storeLog, Storage.Cosmos.config (log,storeLog) (cache, __.Unfolds, __.BatchSize) (Storage.Cosmos.Info sargs) + storeLog, Storage.Cosmos.config log (cache, __.Unfolds, __.BatchSize) (Storage.Cosmos.Info sargs) | Some (Es sargs) -> let storeLog = createStoreLog <| sargs.Contains Storage.EventStore.Arguments.VerboseStore log.Information("Running transactions in-process against EventStore with storage options: {options:l}", __.Options) @@ -209,7 +209,7 @@ and Test = Favorite | SaveForLater | Todo let createStoreLog verbose verboseConsole maybeSeqEndpoint = let c = LoggerConfiguration().Destructure.FSharpTypes() let c = if verbose then c.MinimumLevel.Debug() else c - let c = c.WriteTo.Sink(Equinox.Cosmos.Store.Log.InternalMetrics.Stats.LogSink()) + let c = c.WriteTo.Sink(Equinox.CosmosStore.Core.Log.InternalMetrics.Stats.LogSink()) let c = c.WriteTo.Sink(Equinox.EventStore.Log.InternalMetrics.Stats.LogSink()) let c = c.WriteTo.Sink(Equinox.SqlStreamStore.Log.InternalMetrics.Stats.LogSink()) let level = @@ -223,6 +223,7 @@ let createStoreLog verbose verboseConsole maybeSeqEndpoint = c.CreateLogger() :> ILogger module LoadTest = + open Equinox.Tools.TestHarness let private runLoadTest log testsPerSecond duration errorCutoff reportingIntervals (clients : ClientId[]) runSingleTest = @@ -273,7 +274,7 @@ module LoadTest = .Information("Running {test} for {duration} @ {tps} hits/s across {clients} clients; Max errors: {errorCutOff}, reporting intervals: {ri}, report file: {report}", test, a.Duration, a.TestsPerSecond, clients.Length, a.ErrorCutoff, a.ReportingIntervals, reportFilename) // Reset the start time based on which the shared global metrics will be computed - let _ = Equinox.Cosmos.Store.Log.InternalMetrics.Stats.LogSink.Restart() + let _ = Equinox.CosmosStore.Core.Log.InternalMetrics.Stats.LogSink.Restart() let _ = Equinox.EventStore.Log.InternalMetrics.Stats.LogSink.Restart() let _ = Equinox.SqlStreamStore.Log.InternalMetrics.Stats.LogSink.Restart() let results = runLoadTest log a.TestsPerSecond (duration.Add(TimeSpan.FromSeconds 5.)) a.ErrorCutoff a.ReportingIntervals clients runSingleTest |> Async.RunSynchronously @@ -285,7 +286,7 @@ module LoadTest = match storeConfig with | Some (Storage.StorageConfig.Cosmos _) -> - Equinox.Cosmos.Store.Log.InternalMetrics.dump log + Equinox.CosmosStore.Core.Log.InternalMetrics.dump log | Some (Storage.StorageConfig.Es _) -> Equinox.EventStore.Log.InternalMetrics.dump log | Some (Storage.StorageConfig.Sql _) -> @@ -295,7 +296,7 @@ module LoadTest = let createDomainLog verbose verboseConsole maybeSeqEndpoint = let c = LoggerConfiguration().Destructure.FSharpTypes().Enrich.FromLogContext() let c = if verbose then c.MinimumLevel.Debug() else c - let c = c.WriteTo.Sink(Equinox.Cosmos.Store.Log.InternalMetrics.Stats.LogSink()) + let c = c.WriteTo.Sink(Equinox.CosmosStore.Core.Log.InternalMetrics.Stats.LogSink()) let c = c.WriteTo.Sink(Equinox.EventStore.Log.InternalMetrics.Stats.LogSink()) let c = c.WriteTo.Sink(Equinox.SqlStreamStore.Log.InternalMetrics.Stats.LogSink()) let outputTemplate = "{Timestamp:T} {Level:u1} {Message:l} {Properties}{NewLine}{Exception}" @@ -304,25 +305,25 @@ let createDomainLog verbose verboseConsole maybeSeqEndpoint = c.CreateLogger() module CosmosInit = - open Equinox.Cosmos.Store.Sync.Initialization - let conn (log,verboseConsole,maybeSeq) (sargs : ParseResults) = async { - let storeLog = createStoreLog (sargs.Contains Storage.Cosmos.Arguments.VerboseStore) verboseConsole maybeSeq - let discovery, dName, cName, connector = Storage.Cosmos.connection (log,storeLog) (Storage.Cosmos.Info sargs) - let! conn = connector.Connect(appName, discovery) - return storeLog, conn, dName, cName } - let containerAndOrDb (log: ILogger, verboseConsole, maybeSeq) (iargs: ParseResults) = async { + open Equinox.CosmosStore.Core.Initialization + + let conn log (sargs : ParseResults) = + Storage.Cosmos.conn log (Storage.Cosmos.Info sargs) + + let containerAndOrDb log (iargs: ParseResults) = async { match iargs.TryGetSubCommand() with | Some (InitArguments.Cosmos sargs) -> let rus, skipStoredProc = iargs.GetResult(InitArguments.Rus), iargs.Contains InitArguments.SkipStoredProc let mode = if iargs.Contains InitArguments.Shared then Provisioning.Database rus else Provisioning.Container rus let modeStr, rus = match mode with Provisioning.Container rus -> "Container",rus | Provisioning.Database rus -> "Database",rus - let! _storeLog,conn,dName,cName = conn (log,verboseConsole,maybeSeq) sargs - log.Information("Provisioning `Equinox.Cosmos` Store at {mode:l} level for {rus:n0} RU/s", modeStr, rus) - return! init log conn.Client (dName,cName) mode skipStoredProc + let client,dName,cName = conn log sargs + log.Information("Provisioning `Equinox.CosmosStore` Store at {mode:l} level for {rus:n0} RU/s", modeStr, rus) + return! init log client (dName,cName) mode skipStoredProc | _ -> failwith "please specify a `cosmos` endpoint" } module SqlInit = + let databaseOrSchema (log: ILogger) (iargs: ParseResults) = async { match iargs.TryGetSubCommand() with | Some (ConfigArguments.MsSql sargs) -> @@ -337,6 +338,7 @@ module SqlInit = | _ -> failwith "please specify a `ms`,`my` or `pg` endpoint" } module CosmosStats = + type Microsoft.Azure.Cosmos.Container with // NB DO NOT CONSIDER PROMULGATING THIS HACK member container.QueryValue<'T>(sqlQuery : string) = @@ -348,8 +350,8 @@ module CosmosStats = let doS,doD,doE = args.Contains StatsArguments.Streams, args.Contains StatsArguments.Documents, args.Contains StatsArguments.Events let doS = doS || (not doD && not doE) // default to counting streams only unless otherwise specified let inParallel = args.Contains Parallel - let! _storeLog,conn,dName,cName = CosmosInit.conn (log,verboseConsole,maybeSeq) sargs - let container = conn.Client.GetContainer(dName, cName) + let client,dName,cName = CosmosInit.conn log sargs + let container = client.GetContainer(dName, cName) let ops = [ if doS then yield "Streams", """SELECT VALUE COUNT(1) FROM c WHERE c.id="-1" """ if doD then yield "Documents", """SELECT VALUE COUNT(1) FROM c""" @@ -365,6 +367,7 @@ module CosmosStats = | _ -> failwith "please specify a `cosmos` endpoint" } module Dump = + let run (log : ILogger, verboseConsole, maybeSeq) (args : ParseResults) = let a = DumpInfo args let createStoreLog verboseStore = createStoreLog verboseStore verboseConsole maybeSeq @@ -426,7 +429,7 @@ let main argv = let verbose = args.Contains Verbose use log = createDomainLog verbose verboseConsole maybeSeq try match args.GetSubCommand() with - | Init iargs -> CosmosInit.containerAndOrDb (log, verboseConsole, maybeSeq) iargs |> Async.RunSynchronously + | Init iargs -> CosmosInit.containerAndOrDb log iargs |> Async.RunSynchronously | Config cargs -> SqlInit.databaseOrSchema log cargs |> Async.RunSynchronously | Dump dargs -> Dump.run (log, verboseConsole, maybeSeq) dargs |> Async.RunSynchronously | Stats sargs -> CosmosStats.run (log, verboseConsole, maybeSeq) sargs |> Async.RunSynchronously