diff --git a/.github/workflows/dotnet-core.yml b/.github/workflows/dotnet-core.yml index cb090315..ff6e7237 100644 --- a/.github/workflows/dotnet-core.yml +++ b/.github/workflows/dotnet-core.yml @@ -42,7 +42,7 @@ jobs: uses: actions/setup-dotnet@v3 if: contains(env.DOTNET_VERSION_LIST, '6.0.') == 'false' || contains(env.DOTNET_VERSION_LIST, '7.0.') == 'false' || contains(env.DOTNET_VERSION_LIST, '8.0.') == 'false' with: - dotnet-version: '${{ runner.dotnet }}' + dotnet-version: "6\n7\n8\n" - name: Setup .NET 4.8.1 if on windows uses: actions/setup-dotnet@v3 @@ -129,7 +129,7 @@ jobs: - name: Upload artifacts uses: actions/upload-artifact@v4 id: nuget-artifact-upload-step - if: matrix.pack + if: npack with: name: NuGetPackages path: NuGetPackages/Release/*.nupkg diff --git a/Imageflow.Server.sln b/Imageflow.Server.sln index 67ecf087..5f95a957 100644 --- a/Imageflow.Server.sln +++ b/Imageflow.Server.sln @@ -1,4 +1,4 @@ - + Microsoft Visual Studio Solution File, Format Version 12.00 # Visual Studio Version 16 VisualStudioVersion = 16.0.29503.13 @@ -60,8 +60,6 @@ Project("{FAE04EC0-301F-11D3-BF4B-00C04F79EFBC}") = "Imageflow.Server.Host", "sr EndProject Project("{FAE04EC0-301F-11D3-BF4B-00C04F79EFBC}") = "Imageflow.Server.Configuration.Tests", "tests\Imageflow.Server.Configuration.Tests\Imageflow.Server.Configuration.Tests.csproj", "{200CD4DA-C49B-413B-8FCF-8DA73A2803E0}" EndProject -Project("{FAE04EC0-301F-11D3-BF4B-00C04F79EFBC}") = "Imazen.LegacyDotNetCompatTests", "tests\Imazen.LegacyDotNetCompatTests\Imazen.LegacyDotNetCompatTests.csproj", "{8EF6CC3F-027C-486C-B30B-09773E0C7966}" -EndProject Project("{FAE04EC0-301F-11D3-BF4B-00C04F79EFBC}") = "Imazen.Routing", "src\Imazen.Routing\Imazen.Routing.csproj", "{5EF0F9EA-D6DE-4E34-9141-4C44DA08A726}" EndProject Project("{FAE04EC0-301F-11D3-BF4B-00C04F79EFBC}") = "Imazen.Abstractions", "src\Imazen.Abstractions\Imazen.Abstractions.csproj", "{A04B9BE0-4931-4305-B9AB-B79737130F20}" @@ -152,10 +150,6 @@ Global {200CD4DA-C49B-413B-8FCF-8DA73A2803E0}.Debug|Any CPU.Build.0 = Debug|Any CPU {200CD4DA-C49B-413B-8FCF-8DA73A2803E0}.Release|Any CPU.ActiveCfg = Release|Any CPU {200CD4DA-C49B-413B-8FCF-8DA73A2803E0}.Release|Any CPU.Build.0 = Release|Any CPU - {8EF6CC3F-027C-486C-B30B-09773E0C7966}.Debug|Any CPU.ActiveCfg = Debug|Any CPU - {8EF6CC3F-027C-486C-B30B-09773E0C7966}.Debug|Any CPU.Build.0 = Debug|Any CPU - {8EF6CC3F-027C-486C-B30B-09773E0C7966}.Release|Any CPU.ActiveCfg = Release|Any CPU - {8EF6CC3F-027C-486C-B30B-09773E0C7966}.Release|Any CPU.Build.0 = Release|Any CPU {5EF0F9EA-D6DE-4E34-9141-4C44DA08A726}.Debug|Any CPU.ActiveCfg = Debug|Any CPU {5EF0F9EA-D6DE-4E34-9141-4C44DA08A726}.Debug|Any CPU.Build.0 = Debug|Any CPU {5EF0F9EA-D6DE-4E34-9141-4C44DA08A726}.Release|Any CPU.ActiveCfg = Release|Any CPU diff --git a/src/Imageflow.Server.Configuration/Imageflow.Server.Configuration.csproj b/src/Imageflow.Server.Configuration/Imageflow.Server.Configuration.csproj index 06da7f23..46ec5ca3 100644 --- a/src/Imageflow.Server.Configuration/Imageflow.Server.Configuration.csproj +++ b/src/Imageflow.Server.Configuration/Imageflow.Server.Configuration.csproj @@ -1,6 +1,6 @@ - + net8.0 diff --git a/src/Imageflow.Server.Storage.AzureBlob/AzureBlobService.cs b/src/Imageflow.Server.Storage.AzureBlob/AzureBlobService.cs index b5ca91af..24b6c1e2 100644 --- a/src/Imageflow.Server.Storage.AzureBlob/AzureBlobService.cs +++ b/src/Imageflow.Server.Storage.AzureBlob/AzureBlobService.cs @@ -101,8 +101,8 @@ public async Task> Fetch(string virtualPath) var blobClient = containerClient.GetBlobClient(key); var reference = new AzureBlobStorageReference(containerClient.Uri.AbsoluteUri, key); var s = await blobClient.DownloadStreamingAsync(); - - return CodeResult.Ok(new BlobWrapper(new AzureBlob(reference, s.Value))); + var latencyZone = new LatencyTrackingZone($"azure::blob/{mapping.Container}", 100); + return CodeResult.Ok(new BlobWrapper(latencyZone,new AzureBlob(reference, s.Value))); } catch (RequestFailedException e) diff --git a/src/Imageflow.Server.Storage.AzureBlob/Caching/AzureBlobCache.cs b/src/Imageflow.Server.Storage.AzureBlob/Caching/AzureBlobCache.cs index 909ad70f..e29c5b31 100644 --- a/src/Imageflow.Server.Storage.AzureBlob/Caching/AzureBlobCache.cs +++ b/src/Imageflow.Server.Storage.AzureBlob/Caching/AzureBlobCache.cs @@ -172,7 +172,7 @@ public async Task> CacheFetch(IBlo { var response = await blob.DownloadStreamingAsync(new BlobDownloadOptions(), cancellationToken); SetContainerExists(groupConfig.Location.ContainerName, true); - return BlobCacheFetchFailure.OkResult(new BlobWrapper(new AzureBlob(storage, response))); + return BlobCacheFetchFailure.OkResult(new BlobWrapper(null,new AzureBlob(storage, response))); } catch (Azure.RequestFailedException ex) diff --git a/src/Imageflow.Server.Storage.S3/Caching/S3BlobCache.cs b/src/Imageflow.Server.Storage.S3/Caching/S3BlobCache.cs index f1d42484..5769159d 100644 --- a/src/Imageflow.Server.Storage.S3/Caching/S3BlobCache.cs +++ b/src/Imageflow.Server.Storage.S3/Caching/S3BlobCache.cs @@ -177,8 +177,9 @@ public async Task CacheFetch(IBlobCacheRequest request, Cancell var result = await client.GetObjectAsync(req, cancellationToken); if (result.HttpStatusCode == HttpStatusCode.OK) { + var latencyZone = new LatencyTrackingZone($"s3::bucket/{bucket}", 100); return BlobCacheFetchFailure.OkResult( - new BlobWrapper(new S3Blob(result))); + new BlobWrapper(latencyZone,new S3Blob(result))); } // 404/403 are cache misses and return these diff --git a/src/Imageflow.Server.Storage.S3/S3Service.cs b/src/Imageflow.Server.Storage.S3/S3Service.cs index c8551deb..ec986f4a 100644 --- a/src/Imageflow.Server.Storage.S3/S3Service.cs +++ b/src/Imageflow.Server.Storage.S3/S3Service.cs @@ -78,8 +78,9 @@ public async Task> Fetch(string virtualPath) var client = mapping.S3Client ?? this.s3Client; var req = new Amazon.S3.Model.GetObjectRequest() { BucketName = mapping.Bucket, Key = key }; + var latencyZone = new LatencyTrackingZone($"s3::bucket/{mapping.Bucket}", 100); var s = await client.GetObjectAsync(req); - return new BlobWrapper(new S3Blob(s)); + return new BlobWrapper(latencyZone,new S3Blob(s)); } catch (AmazonS3Exception se) { if (se.StatusCode == System.Net.HttpStatusCode.NotFound || "NoSuchKey".Equals(se.ErrorCode, StringComparison.OrdinalIgnoreCase)) diff --git a/src/Imageflow.Server/Imageflow.Server.csproj b/src/Imageflow.Server/Imageflow.Server.csproj index 7e6e73bd..f7f3f018 100644 --- a/src/Imageflow.Server/Imageflow.Server.csproj +++ b/src/Imageflow.Server/Imageflow.Server.csproj @@ -1,5 +1,5 @@  - + net6.0;net8.0 diff --git a/src/Imazen.Abstractions/Blobs/BlobWrapper.cs b/src/Imazen.Abstractions/Blobs/BlobWrapper.cs index 60d0194c..db907346 100644 --- a/src/Imazen.Abstractions/Blobs/BlobWrapper.cs +++ b/src/Imazen.Abstractions/Blobs/BlobWrapper.cs @@ -35,18 +35,32 @@ public class BlobWrapper : IBlobWrapper { private IConsumableBlob? consumable; private IReusableBlob? reusable; + internal DateTime CreatedAtUtc { get; } + internal LatencyTrackingZone? LatencyZone { get; set; } - public BlobWrapper(IConsumableBlob consumable) + public BlobWrapper(LatencyTrackingZone? latencyZone, IConsumableBlob consumable) { this.consumable = consumable; this.Attributes = consumable.Attributes; + CreatedAtUtc = DateTime.UtcNow; + LatencyZone = latencyZone; } - - public BlobWrapper(IReusableBlob reusable) + public BlobWrapper(LatencyTrackingZone? latencyZone, IReusableBlob reusable) { this.reusable = reusable; this.Attributes = reusable.Attributes; + CreatedAtUtc = DateTime.UtcNow; + LatencyZone = latencyZone; } + [Obsolete("Use the constructor that takes a first parameter of LatencyTrackingZone")] + public BlobWrapper(IConsumableBlob consumable) + { + this.consumable = consumable; + this.Attributes = consumable.Attributes; + CreatedAtUtc = DateTime.UtcNow; + } + + public IBlobAttributes Attributes { get; } public bool IsNativelyReusable => reusable != null; diff --git a/src/Imazen.Abstractions/Blobs/LatencyTrackingZone.cs b/src/Imazen.Abstractions/Blobs/LatencyTrackingZone.cs index ca5052d5..e3f3b57e 100644 --- a/src/Imazen.Abstractions/Blobs/LatencyTrackingZone.cs +++ b/src/Imazen.Abstractions/Blobs/LatencyTrackingZone.cs @@ -10,4 +10,4 @@ namespace Imazen.Abstractions.Blobs; /// /// public record LatencyTrackingZone( - string TrackingZone, int DefaultMs); \ No newline at end of file + string TrackingZone, int DefaultMs, bool AlwaysShield = false); \ No newline at end of file diff --git a/src/Imazen.Abstractions/Blobs/ReusableArraySegmentBlob.cs b/src/Imazen.Abstractions/Blobs/ReusableArraySegmentBlob.cs index 69727c70..ac1011d9 100644 --- a/src/Imazen.Abstractions/Blobs/ReusableArraySegmentBlob.cs +++ b/src/Imazen.Abstractions/Blobs/ReusableArraySegmentBlob.cs @@ -19,9 +19,12 @@ public class ReusableArraySegmentBlob : IReusableBlob public IBlobAttributes Attributes { get; } private readonly ArraySegment data; private bool disposed = false; + public TimeSpan CreationDuration { get; init; } + public DateTime CreationCompletionUtc { get; init; } = DateTime.UtcNow; - public ReusableArraySegmentBlob(ArraySegment data, IBlobAttributes metadata) + public ReusableArraySegmentBlob(ArraySegment data, IBlobAttributes metadata, TimeSpan creationDuration) { + CreationDuration = creationDuration; this.data = data; this.Attributes = metadata; // Precalculate since it will be called often diff --git a/src/Imazen.Abstractions/Blobs/SimpleReusableBlobFactory.cs b/src/Imazen.Abstractions/Blobs/SimpleReusableBlobFactory.cs index 81e336f7..4cefe597 100644 --- a/src/Imazen.Abstractions/Blobs/SimpleReusableBlobFactory.cs +++ b/src/Imazen.Abstractions/Blobs/SimpleReusableBlobFactory.cs @@ -1,3 +1,6 @@ +using System.Diagnostics; +using System.Net.Http.Headers; + namespace Imazen.Abstractions.Blobs; /// @@ -22,6 +25,7 @@ public async ValueTask ConsumeAndCreateReusableCopy(IConsumableBl { using (consumableBlob) { + var sw = Stopwatch.StartNew(); #if NETSTANDARD2_1_OR_GREATER await using var stream = consumableBlob.TakeStream(); #else @@ -32,7 +36,8 @@ public async ValueTask ConsumeAndCreateReusableCopy(IConsumableBl ms.Position = 0; var byteArray = ms.ToArray(); var arraySegment = new ArraySegment(byteArray); - var reusable = new ReusableArraySegmentBlob(arraySegment, consumableBlob.Attributes); + sw.Stop(); + var reusable = new ReusableArraySegmentBlob(arraySegment, consumableBlob.Attributes, sw.Elapsed); return reusable; } } diff --git a/src/Imazen.HybridCache/AsyncCache.cs b/src/Imazen.HybridCache/AsyncCache.cs index a67ab16f..49361d68 100644 --- a/src/Imazen.HybridCache/AsyncCache.cs +++ b/src/Imazen.HybridCache/AsyncCache.cs @@ -63,11 +63,13 @@ public AsyncCache(AsyncCacheOptions options, ICacheCleanupManager cleanupManager SubscribesToFreshResults = true, RequiresInlineExecution = false }; + LatencyZone = new LatencyTrackingZone($"Hybrid Disk Cache ('{UniqueName}')", 30); } ICacheDatabase Database { get; } - + private LatencyTrackingZone LatencyZone { get; set; } + private AsyncCacheOptions Options { get; } private HashBasedPathBuilder PathBuilder { get; } private ILogger Logger { get; } @@ -178,7 +180,7 @@ private static bool IsFileLocked(IOException exception) if (openedStream != null) { //TODO: add contended hit detail - return AsyncCacheResult.FromHit(record, entry.RelativePath, PathBuilder, openedStream, this, this); + return AsyncCacheResult.FromHit(record, entry.RelativePath, PathBuilder, openedStream, LatencyZone, this, this); } return null; @@ -203,7 +205,7 @@ private static bool IsFileLocked(IOException exception) { return AsyncCacheResult.FromHit(record, entry.RelativePath, PathBuilder, new FileStream( entry.PhysicalPath, FileMode.Open, FileAccess.Read, FileShare.Read, 4096, - FileOptions.Asynchronous | FileOptions.SequentialScan), this, this); + FileOptions.Asynchronous | FileOptions.SequentialScan), LatencyZone, this, this); } catch (FileNotFoundException) @@ -747,7 +749,7 @@ private static class AsyncCacheResult internal static IResult FromHit(ICacheDatabaseRecord? record, string entryRelativePath, HashBasedPathBuilder interpreter, - FileStream stream, IBlobCache notifyOfResult, IBlobCache notifyOfExternalHit) + FileStream stream, LatencyTrackingZone latencyZone, IBlobCache notifyOfResult, IBlobCache notifyOfExternalHit) { var blob = new ConsumableBlob(new BlobAttributes() { @@ -759,7 +761,7 @@ internal static IResult FromHit(ICacheData BlobStorageReference = new FileBlobStorageReference(entryRelativePath, interpreter, record) }, stream); - return Result.Ok(new BlobWrapper(blob)); + return Result.Ok( new BlobWrapper(latencyZone, blob)); } diff --git a/src/Imazen.Routing/Caching/MemoryCache.cs b/src/Imazen.Routing/Caching/MemoryCache.cs index 7dcd2ba6..bac78404 100644 --- a/src/Imazen.Routing/Caching/MemoryCache.cs +++ b/src/Imazen.Routing/Caching/MemoryCache.cs @@ -55,7 +55,7 @@ public static async IAsyncEnumerable AsAsyncEnumerable(this IEnumerable /// Items that are under the MinKeepNewItemsFor grace period should be kept in a separate list. /// They can graduate to the main list if they are accessed more than x times /// -public class MemoryCache(MemoryCacheOptions Options) : IBlobCache +public class MemoryCache(MemoryCacheOptions options) : IBlobCache { private record CacheEntry(string CacheKey, IBlobWrapper BlobWrapper, UsageTracker UsageTracker) { @@ -76,7 +76,7 @@ public string GetFullyQualifiedRepresentation() } ConcurrentDictionary __cache = new ConcurrentDictionary(); - public string UniqueName => Options.UniqueName; + public string UniqueName => options.UniqueName; public BlobCacheCapabilities InitialCacheCapabilities { get; } = new BlobCacheCapabilities @@ -110,13 +110,13 @@ public Task CacheFetch(IBlobCacheRequest request, Cancellation return Task.FromResult(BlobCacheFetchFailure.MissResult(this, this)); } - private long MemoryUsedSync; - private long ItemCountSync; + private long memoryUsedSync; + private long itemCountSync; private bool TryRemove(string cacheKey, [MaybeNullWhen(false)] out CacheEntry removed) { if (!__cache.TryRemove(cacheKey, out removed)) return false; - Interlocked.Add(ref MemoryUsedSync, -removed.BlobWrapper.EstimateAllocatedBytes ?? 0); - Interlocked.Decrement(ref ItemCountSync); + Interlocked.Add(ref memoryUsedSync, -removed.BlobWrapper.EstimateAllocatedBytes ?? 0); + Interlocked.Decrement(ref itemCountSync); return true; } @@ -124,7 +124,7 @@ private bool TryEnsureCapacity(long size) { List? snapshotOfEntries = null; int nextCandidateIndex = 0; - while (ItemCountSync > Options.MaxItems || MemoryUsedSync > Options.MaxMemoryUtilizationMb * 1024 * 1024) + while (itemCountSync > options.MaxItems || memoryUsedSync + size > options.MaxMemoryUtilizationMb * 1024 * 1024) { if (snapshotOfEntries == null) { @@ -138,7 +138,7 @@ private bool TryEnsureCapacity(long size) return false; // We've run out of candidates. We can't make space. } var candidate = snapshotOfEntries[nextCandidateIndex++]; - if (candidate.UsageTracker.LastAccessedUtc > DateTimeOffset.UtcNow - Options.MinKeepNewItemsFor) + if (candidate.UsageTracker.LastAccessedUtc > DateTimeOffset.UtcNow - options.MinKeepNewItemsFor) { nextCandidateIndex++; // Skip this item continue; // This item is too new to evict. @@ -169,7 +169,7 @@ private bool TryAdd(string cacheKey, IBlobWrapper blob) } } var replacementSizeDifference = (long)blob.EstimateAllocatedBytes! - (existingBlob?.EstimateAllocatedBytes ?? 0); - if (blob.EstimateAllocatedBytes > Options.MaxItemSizeKb * 1024) + if (blob.EstimateAllocatedBytes > options.MaxItemSizeKb * 1024) { return false; } @@ -179,14 +179,14 @@ private bool TryAdd(string cacheKey, IBlobWrapper blob) } var entry = new CacheEntry(cacheKey, blob, UsageTracker.Create()); - if (entry == __cache.AddOrUpdate(cacheKey, entry, (key, existing) => existing with { BlobWrapper = blob })) + if (entry == __cache.AddOrUpdate(cacheKey, entry, (_, existing) => existing with { BlobWrapper = blob })) { - Interlocked.Increment(ref ItemCountSync); - Interlocked.Add(ref MemoryUsedSync, blob.EstimateAllocatedBytes ?? 0); - ItemCountSync++; + Interlocked.Increment(ref itemCountSync); + Interlocked.Add(ref memoryUsedSync, blob.EstimateAllocatedBytes ?? 0); + itemCountSync++; return true; } - Interlocked.Add(ref MemoryUsedSync, replacementSizeDifference); + Interlocked.Add(ref memoryUsedSync, replacementSizeDifference); return false; } @@ -237,7 +237,7 @@ public Task CacheDelete(IBlobStorageReference reference, Cancellatio { if (reference is MemoryCacheStorageReference memoryCacheStorageReference) { - if (TryRemove(memoryCacheStorageReference.CacheKey, out var removed)) + if (TryRemove(memoryCacheStorageReference.CacheKey, out _)) { return Task.FromResult(CodeResult.Ok()); } diff --git a/src/Imazen.Routing/Caching/cache_design.md b/src/Imazen.Routing/Caching/cache_design.md deleted file mode 100644 index 4548467f..00000000 --- a/src/Imazen.Routing/Caching/cache_design.md +++ /dev/null @@ -1,79 +0,0 @@ - -# BlobCache Design - -We have multiple cache engine implementations - one for long-running, one for serverless/fast actions. - -Ideally, we would want both routing and serverless cache logic to be exportable to Javascript for running on Lambda@Edge, Vercel, Netlify, Cloudflare Edge, etc. - -TrustedKeyGroups can allow public/private signing so that the public keys can be deployed to the edge, and the private keys can be used to sign requests. - - -### Put hard logic in BlobCacheExecutor - -* AsyncWriteQueue -* MemoryCache -* RecentlyCachedHashes (tracking where we've recently cached stuff) -* ExistenceProbability (tracking how likely a cache is to have a file) -* CircuitBreaker (tracking if a cache is failing to read or write) -* Parallel cache requests. -* CircuitBreaker failures may mean initializing a new cache (such as a hybrid cache to a backup location) and adding it to the chain. -* - -We are abandoning the callback system, because it limits us and makes caches complex. -We are obsoleting StreamCache. - -## Some paths - -* A cache is hit, but a later cache doesn't have a chance at the request. It needs an ExternalHit notificaiton -* A cache experiences a miss, but another cache has a hit. ExternalHitYouMissed -* A cache experiences a miss, and no other cache has a hit. ExternalMiss -* ExternalMiss and successful generation of the image. ResultGenerated -* ExternalMiss and failure to generate the image. ResultFailed -* Proxied request. ResultProxiedSuccessfully - - ------ - -What we need to do is switch away from the callback system. While chaining is an option, what's really happening with callbacks is enabling locking for work deduplication. - -When a cache has a miss, it also returns a request to be notified of when data is available for that request. Caches get notifications when data is generated (or located in another cache) for something they missed - but also for when a request is hit via another cache earlier in the chain. - -If all caches miss, the engine locks on the image generation process. -When we succeed through the lock, we check the async write queue (which will now be shared among all caches). If it's a hit there, then we can respond using that reusable stream to serve the data. - -If it's not in the async write queue, we could either (a) current behavior for hybridcache - check the cache storage again) or (b) check a 'recently cached hashes log which is reset every few minutes to determine if we should re-run the cache fetch, or (c) not check caches, just generate the image. (c) assumes that writes will take enough time to complete that the images will always stay in the queue a little longer (and we could sleep 50ms or something to help that), or (d) check the write queue, then check the recent puts log, if present, then fetch from cache. - - -I think this approach, a central write queue and external cache coordination, offers the most flexibility and is the way to go, even though it means breaking the streamcache API. - -Now, for requests that do not involve any file processing - proxy requests. For these, creating a reusable stream involves extra costs and we want to avoid it. To reduce proxy latency, we should copy to the output stream simultaneously with a memory stream (maybe?) and then enqueue that write as normal. - -We also want to support a no-cache scenario efficiently - why s3 cache s3 files, right? But even then, some buffering is probably good - -We probably want a generalized memory cache, bounded, possibly not eagerly evicted, and one that can clear entries that failed their tasks or timed out. We want some insight into it as well, such as turnover and cache hit rate. - -To reduce fragmentation, we probably want a memory chunk provider that can reuse blocks. We would have to refcount reader streams before releasing, however. - -Dual framework imazen.common perhaps. - -We should probably utilize circuit breaker on cache reads and writes. And take latency stats for fetches and writes, perhaps to determine cache order? - -For hybridcache, we can add a look-in-alt-cache directories setting, or just circuit breaker cache writes independently from cache reads, and just create (n) caches. We could lockfile the whole write side of the cache too, and leave that memory index offline for it. - - -Fetches, writes, and tag searches should all return location references for audit/logging - -And for cache fetches, we probably want *some* caches to fetch in parallel and take whomever comes back first. - -So instead of a chain, we want a list of groups? - - - - - - - - - - - diff --git a/src/Imazen.Routing/Caching/design.md b/src/Imazen.Routing/Caching/design.md index 59eba66d..6a1a349d 100644 --- a/src/Imazen.Routing/Caching/design.md +++ b/src/Imazen.Routing/Caching/design.md @@ -1,13 +1,7 @@ -# design notes +# design notes (all of this is outdated and should be ignored) We want to enable chaining, where one request could be based on another. - - -# IExecutionPlan -promise + debug info + source file info - - # Execution plan A simple plan might just include the input image locations and diff --git a/src/Imazen.Routing/Health/BehaviorMetrics.cs b/src/Imazen.Routing/Health/BehaviorMetrics.cs index ac56a6f8..7c4bac0b 100644 --- a/src/Imazen.Routing/Health/BehaviorMetrics.cs +++ b/src/Imazen.Routing/Health/BehaviorMetrics.cs @@ -24,6 +24,11 @@ internal struct BehaviorMetrics(MetricBasis basis, BehaviorTask task) public int TotalFailureReports { get; private set; } = 0; public int ConsecutiveSuccessReports { get; private set; } = 0; public int ConsecutiveFailureReports { get; private set; } = 0; + + // track TTFB, TTLB, and byte count? + // If the ArraySegmentBlobWrapper duration is > 0 < 5ms, it was probably + // buffered prior, and we will assume that only TTLB is relevant. + public void ReportBehavior(bool ok, BehaviorTask task, TimeSpan taskDuration) { diff --git a/src/Imazen.Routing/Layers/BlobProvidersLayer.cs b/src/Imazen.Routing/Layers/BlobProvidersLayer.cs index 3addf866..96e3d195 100644 --- a/src/Imazen.Routing/Layers/BlobProvidersLayer.cs +++ b/src/Imazen.Routing/Layers/BlobProvidersLayer.cs @@ -176,7 +176,7 @@ public override async ValueTask> TryGetBlobAsync(IReque // IBlobData is IDispose, but if someone calls TakeStream on the consumable blob and disposes the consumable blob, the stream will probably be // disposed if IBlobData calls dispose on the response object and that also disposes the stream. - return CodeResult.Ok(new BlobWrapper(new ConsumableBlob(attrs, stream, false, blobData, true))); + return CodeResult.Ok(new BlobWrapper(LatencyZone, new ConsumableBlob(attrs, stream, false, blobData, true))); } } diff --git a/src/Imazen.Routing/Layers/LocalFilesLayer.cs b/src/Imazen.Routing/Layers/LocalFilesLayer.cs index baf6b0d5..9ab5c877 100644 --- a/src/Imazen.Routing/Layers/LocalFilesLayer.cs +++ b/src/Imazen.Routing/Layers/LocalFilesLayer.cs @@ -113,7 +113,7 @@ public override void WriteCacheKeyBasisPairsToRecursive(IBufferWriter writ public override ValueTask> TryGetBlobAsync(IRequestSnapshot request, IBlobRequestRouter router, IBlobPromisePipeline pipeline, CancellationToken cancellationToken = default) { - return Tasks.ValueResult(CodeResult.Ok(new BlobWrapper(new PhysicalFileBlob(PhysicalPath, LastWriteTimeUtc)))); + return Tasks.ValueResult(CodeResult.Ok(new BlobWrapper(this.LatencyZone, new PhysicalFileBlob(PhysicalPath, LastWriteTimeUtc)))); } } diff --git a/src/Imazen.Routing/Layers/routing_design.md b/src/Imazen.Routing/Layers/routing_design.md index 95719f98..c241aa22 100644 --- a/src/Imazen.Routing/Layers/routing_design.md +++ b/src/Imazen.Routing/Layers/routing_design.md @@ -49,7 +49,7 @@ A priority is fast startup (for action use), but also fast evaluation. MessagePa # To consider How to support remote reader signing, full url signing, and multi-tenanting support with different key sets. Add -support for seeds? +support for seeds? And pub/private key support would allow untrusted edge functions to verify signatures. Minimizing utf32 overhead, perhaps keep utf8 version of string types? Or make the data type framework-conditional, with overloads so strings can always be specified when using the C# interface? diff --git a/src/Imazen.Routing/Promises/Pipelines/ServerlessCacheEngine.cs b/src/Imazen.Routing/Promises/Pipelines/CacheEngine.cs similarity index 91% rename from src/Imazen.Routing/Promises/Pipelines/ServerlessCacheEngine.cs rename to src/Imazen.Routing/Promises/Pipelines/CacheEngine.cs index 9dc07a5b..5a22ac92 100644 --- a/src/Imazen.Routing/Promises/Pipelines/ServerlessCacheEngine.cs +++ b/src/Imazen.Routing/Promises/Pipelines/CacheEngine.cs @@ -15,51 +15,13 @@ namespace Imazen.Routing.Promises.Pipelines; - -public record ServerlessCacheEngineOptions -{ - public ServerlessCacheEngineOptions(){} - public ServerlessCacheEngineOptions(List simultaneousFetchAndPut, BoundedTaskCollection? taskPool, IReLogger logger) - { - SeriesOfCacheGroups = new List>(){simultaneousFetchAndPut}; - SaveToCaches = simultaneousFetchAndPut; - UploadQueue = taskPool; - Logger = logger; - - } - // Each cache group is a list of caches that can be queried in parallel - public required List> SeriesOfCacheGroups { get; init; } - - public required List SaveToCaches { get; init; } - - [Obsolete("Use the parameterized one local to the request")] - public IBlobRequestRouter? RequestRouter { get; init; } - - public required IReusableBlobFactory BlobFactory { get; init; } - - public required BoundedTaskCollection? UploadQueue { get; init; } - - public required IReLogger Logger { get; init; } - - /// - /// If true, provides the opportunity for an IBlobCache to eliminate duplicate requests and prevent thundering herd. - /// - public bool LockByUniqueRequest { get; init; } - - /// - /// How long to wait for fetching and generation of the same request by another thread. - /// - public int LockTimeoutMs { get; init; } = 2000; - - -} /// /// Optimized for quick startup, such as for serverless scenarios. No optimizations for multiple requests /// -public class ServerlessCacheEngine: IBlobPromisePipeline +public class CacheEngine: IBlobPromisePipeline { - public ServerlessCacheEngine(IBlobPromisePipeline? next, ServerlessCacheEngineOptions options) + public CacheEngine(IBlobPromisePipeline? next, CacheEngineOptions options) { Options = options; Next = next; @@ -93,7 +55,7 @@ public async ValueTask> GetFinalPromiseAsync(I protected int AllCachesCount; - public ServerlessCacheEngineOptions Options { get; } + public CacheEngineOptions Options { get; } public static IBlobCacheRequest For(ICacheableBlobPromise promise, BlobGroup blobGroup) { @@ -133,7 +95,9 @@ public async ValueTask> Fetch(ICacheableBlobPromise pro return await FetchInner(cacheRequest, promise, router, cancellationToken); } } + + public async ValueTask> FetchInner(IBlobCacheRequest cacheRequest, ICacheableBlobPromise promise, IBlobRequestRouter router, CancellationToken cancellationToken = default) { // First check the upload queue. @@ -408,7 +372,7 @@ private void HandleUploadAnswers(PutResult[] results) } -internal record ServerlessCachePromise(IRequestSnapshot FinalRequest, ICacheableBlobPromise FreshPromise, ServerlessCacheEngine CacheEngine): ICacheableBlobPromise +internal record ServerlessCachePromise(IRequestSnapshot FinalRequest, ICacheableBlobPromise FreshPromise, CacheEngine CacheEngine): ICacheableBlobPromise { public bool IsCacheSupporting => true; public bool HasDependencies => FreshPromise.HasDependencies; diff --git a/src/Imazen.Routing/Promises/Pipelines/CacheEngineOptions.cs b/src/Imazen.Routing/Promises/Pipelines/CacheEngineOptions.cs new file mode 100644 index 00000000..772253f5 --- /dev/null +++ b/src/Imazen.Routing/Promises/Pipelines/CacheEngineOptions.cs @@ -0,0 +1,45 @@ +using Imazen.Abstractions.BlobCache; +using Imazen.Abstractions.Blobs; +using Imazen.Abstractions.Logging; +using Imazen.Common.Concurrency.BoundedTaskCollection; +using Imazen.Routing.Requests; + +namespace Imazen.Routing.Promises.Pipelines; + +public record CacheEngineOptions +{ + public CacheEngineOptions(){} + public CacheEngineOptions(List simultaneousFetchAndPut, BoundedTaskCollection? taskPool, IReLogger logger) + { + SeriesOfCacheGroups = new List>(){simultaneousFetchAndPut}; + SaveToCaches = simultaneousFetchAndPut; + UploadQueue = taskPool; + Logger = logger; + + } + // Each cache group is a list of caches that can be queried in parallel + public required List> SeriesOfCacheGroups { get; init; } + + public required List SaveToCaches { get; init; } + + [Obsolete("Use the parameterized one local to the request")] + public IBlobRequestRouter? RequestRouter { get; init; } + + public required IReusableBlobFactory BlobFactory { get; init; } + + public required BoundedTaskCollection? UploadQueue { get; init; } + + public required IReLogger Logger { get; init; } + + /// + /// If true, provides the opportunity for an IBlobCache to eliminate duplicate requests and prevent thundering herd. + /// + public bool LockByUniqueRequest { get; init; } + + /// + /// How long to wait for fetching and generation of the same request by another thread. + /// + public int LockTimeoutMs { get; init; } = 2000; + + +} \ No newline at end of file diff --git a/src/Imazen.Routing/Promises/Pipelines/ImagingMiddleware.cs b/src/Imazen.Routing/Promises/Pipelines/ImagingMiddleware.cs index 0c6fa8f5..2f1c1a5f 100644 --- a/src/Imazen.Routing/Promises/Pipelines/ImagingMiddleware.cs +++ b/src/Imazen.Routing/Promises/Pipelines/ImagingMiddleware.cs @@ -1,4 +1,6 @@ using System.Buffers; +using System.Diagnostics; +using System.Text; using Imageflow.Fluent; using Imazen.Abstractions.Blobs; using Imazen.Abstractions.HttpStrings; @@ -91,6 +93,7 @@ public async ValueTask> GetFinalPromiseAsync(I if (commandDict == null && appliedWatermarks == null) { + // No image processing to do... return CodeResult.Ok(wrappedPromise); } var finalCommandString = commandDict == null ? "" : QueryHelpers.AddQueryString("", commandDict); @@ -114,8 +117,7 @@ public ImagingPromise(ImagingMiddlewareOptions options, Options = options; AppliedWatermarks = appliedWatermarks; FinalCommandString = finalCommandString.TrimStart('?'); - LatencyZone = new LatencyTrackingZone("imaging", 1000); - + // determine if we have extra dependencies if (AppliedWatermarks != null) @@ -134,13 +136,38 @@ public ImagingPromise(ImagingMiddlewareOptions options, { Dependencies = new List(){Input0}; } + + } private byte[]? cacheKey32Bytes = null; public byte[] GetCacheKey32Bytes() { return cacheKey32Bytes ??= this.GetCacheKey32BytesUncached(); } - public LatencyTrackingZone? LatencyZone { get; init; } + + private LatencyTrackingZone? latencyZone = null; + /// + /// Must route dependencies first! + /// + public LatencyTrackingZone? LatencyZone { + get + { + if (!ReadyToWriteCacheKeyBasisData) throw new InvalidOperationException("Dependencies must be routed first"); + // produce a latency zone based on all dependency strings, joined, plus the sum of their latency defaults + if (latencyZone != null) return latencyZone; + var latency = 0; + var sb = new StringBuilder(); + sb.Append("imageJob("); + foreach (var dependency in Dependencies!) + { + latency += dependency.LatencyZone?.DefaultMs ?? 0; + sb.Append(dependency.LatencyZone?.TrackingZone ?? "(unknown)"); + } + sb.Append(")"); + latencyZone = new LatencyTrackingZone(sb.ToString(), latency, true); + return latencyZone; + } + } private string FinalCommandString { get; init; } @@ -149,6 +176,7 @@ public async ValueTask> TryGetBlobAsync(IRequestSnapsho IBlobRequestRouter router, IBlobPromisePipeline pipeline, CancellationToken cancellationToken = default) { + var sw = Stopwatch.StartNew(); if (Dependencies == null) throw new InvalidOperationException("Dependencies must be routed first"); // fetch all dependencies in parallel, but avoid allocating if there's only one. List> dependencyResults = new List>(Dependencies.Count); @@ -236,8 +264,9 @@ public async ValueTask> TryGetBlobAsync(IRequestSnapsho ContentType = jobResult.First.PreferredMimeType, BlobByteCount = resultBytes.Value.Count }; - var reusable = new ReusableArraySegmentBlob(resultBytes.Value, attrs); - return CodeResult.Ok(new BlobWrapper(reusable)); + sw.Stop(); + var reusable = new ReusableArraySegmentBlob(resultBytes.Value, attrs, sw.Elapsed); + return CodeResult.Ok(new BlobWrapper(LatencyZone, reusable)); } finally { diff --git a/src/Imazen.Routing/Promises/Pipelines/cache_design.md b/src/Imazen.Routing/Promises/Pipelines/cache_design.md new file mode 100644 index 00000000..93be229f --- /dev/null +++ b/src/Imazen.Routing/Promises/Pipelines/cache_design.md @@ -0,0 +1,148 @@ +# Other issues +ReLogStore diagnostic section report +Multi-tenant support across shared IBlobCache - blob prefixes. +Ensure clarity in whether routes will only handle image extension requests, +extensionless, or +arbitrary or +certain. CORS and mime type definitions are needed. +Diagnostics: check nuget for latest version of imageflow* +Skip signing requirement for dependencies, like watermarks? +Create IExternalService interface for Lambda/Functions/Replicate/other Imageflow JSON job builder +Establish /build.job json endpoint + +Establish IBlobStore interface, or convert IBlobCache interface into IBlobStore. It should support 4 use cases at minimum +1. Get/put blobs at a given path for user use, and list blobs within a given path (s3 directory vs bucket may add considerations) +2. Support the functions needed for IBlobCache +3. Get/put blobs for usage as a job status tracker (such as if the request has been queued with a third party, what the status is, and where the results are at) +4. Pre-signed URLs for upload/download are essential +5. Manage underlying path length and character limitations with encoding/and or hashing as needed; IBlobStore defines the limits and our system works around them. +Establish disk, s3, and azure implementations of IBlobStore. +All disk code should be aware that it could be on-network and be high-latency friendly. + + +# ImagingMiddleware design + +## TODO: Intermediate variant chaining +where we have a dependency on an intermediate variant. We could resolve the promise, determine its existence probability, +and then decide if we want to use it as input. If we do, we can use that promise as input instead. If not, we can use the original input. +We need to spawn a job if it is missing, however, so future calls can use it. + +## TODO: header->query rewriting: Accept-Type: image/webp;q=0.9,image/jpeg;q=0.8,image/png;q=0.7,image/*;q=0.6,*/*;q=0.5 + +# CacheEngine design + + +## TODO: add ability to mark some requests as top-priority for in-memory (such as watermarks) +## TODO: establish both serverless-optimized and long-running optimized CacheEngineOptions variants + +## TODO: Permacaching mode, where nothing gets invalidated? + + +## Test cache searching, purging, and deletion, and auditing everywhere something is stored + +## ExistenceProbability could be reported instantly with the promise... + +## Allow phasing in global invalidations / config changes + + +## Optimzizing the fetch/put sets (other than just caching everything with AlwaysShield=true) + // TODO: track and report fetch times for each cache + // Do a bit of randomization within capabilities so we don't avoid collecting data. For these randoms, let all fetch results complete. + // We also let the first (n) cache fetches complete when starting up, so we have data. + // We always ignore the first result from any Zone + + // We can make decisions based on the fresh promise zone VS the cache zone + // BlobWrapper returns the cache zone, the created date, and (when made reusable) the duration of the buffering process. + // But we need to externally track which IBlobCache instances return which BlobWrappers to properly gather intel + + // All middleware consumes BlobWrappers, so every middleware will need to instrument it.. but since all IBlobCache + // interaction is here, only we need to handle IBlobCache mapping... + // Theoretically multiple caches could use the same zone/bucket. So a single zone could have multiple IBlobCaches + // which is fine since we just want to know the zone perf for each IBlobCache + + + // We need to gather monitored TTFB/TTLB/byte data for each cache. + // When we have a fresh result, if it's from a blob provider, we can monitor that data too + // If it is from 'imaging', that middleware needs to monitor the blob results... + // Choose a cache strategy based on promise latency vs cache latency + + // And of course, if a promise has AlwaysShield=true, we cache it regardless. + // TODO: see what we can steal from instrumentation in licensing for sampling & stats + + +CacheEngine can be configured for serverless or long-running use. +In serverless mode, we rely more heavily on defaults and less on monitoring-based tuning. + + + +============ Old stuff + + +TrustedKeyGroups can allow public/private signing so that the public keys can be deployed to the edge, and the private keys can be used to sign requests. + + +### Put hard logic in BlobCacheExecutor + +* AsyncWriteQueue +* MemoryCache +* RecentlyCachedHashes (tracking where we've recently cached stuff) +* ExistenceProbability (tracking how likely a cache is to have a file) +* CircuitBreaker (tracking if a cache is failing to read or write) +* Parallel cache requests. +* CircuitBreaker failures may mean initializing a new cache (such as a hybrid cache to a backup location) and adding it to the chain. +* + +We are abandoning the callback system, because it limits us and makes caches complex. +We are obsoleting StreamCache. + +## Some paths + +* A cache is hit, but a later cache doesn't have a chance at the request. It needs an ExternalHit notificaiton +* A cache experiences a miss, but another cache has a hit. ExternalHitYouMissed +* A cache experiences a miss, and no other cache has a hit. ExternalMiss +* ExternalMiss and successful generation of the image. ResultGenerated +* ExternalMiss and failure to generate the image. ResultFailed +* Proxied request. ResultProxiedSuccessfully + + +----- +What we need to do is switch away from the callback system. While chaining is an option, what's really happening with callbacks is enabling locking for work deduplication. + +When a cache has a miss, it also returns a request to be notified of when data is available for that request. Caches get notifications when data is generated (or located in another cache) for something they missed - but also for when a request is hit via another cache earlier in the chain. + +If all caches miss, the engine locks on the image generation process. +When we succeed through the lock, we check the async write queue (which will now be shared among all caches). If it's a hit there, then we can respond using that reusable stream to serve the data. + +If it's not in the async write queue, we could either (a) current behavior for hybridcache - check the cache storage again) or (b) check a 'recently cached hashes log which is reset every few minutes to determine if we should re-run the cache fetch, or (c) not check caches, just generate the image. (c) assumes that writes will take enough time to complete that the images will always stay in the queue a little longer (and we could sleep 50ms or something to help that), or (d) check the write queue, then check the recent puts log, if present, then fetch from cache. + + +I think this approach, a central write queue and external cache coordination, offers the most flexibility and is the way to go, even though it means breaking the streamcache API. + +Now, for requests that do not involve any file processing - proxy requests. For these, creating a reusable stream involves extra costs and we want to avoid it. To reduce proxy latency, we should copy to the output stream simultaneously with a memory stream (maybe?) and then enqueue that write as normal. + +We also want to support a no-cache scenario efficiently - why s3 cache s3 files, right? But even then, some buffering is probably good + +We probably want a generalized memory cache, bounded, possibly not eagerly evicted, and one that can clear entries that failed their tasks or timed out. We want some insight into it as well, such as turnover and cache hit rate. + +To reduce fragmentation, we probably want a memory chunk provider that can reuse blocks. We would have to refcount reader streams before releasing, however. + +Dual framework imazen.common perhaps. + +We should probably utilize circuit breaker on cache reads and writes. And take latency stats for fetches and writes, perhaps to determine cache order? + +For hybridcache, we can add a look-in-alt-cache directories setting, or just circuit breaker cache writes independently from cache reads, and just create (n) caches. We could lockfile the whole write side of the cache too, and leave that memory index offline for it. + + +Fetches, writes, and tag searches should all return location references for audit/logging + +And for cache fetches, we probably want *some* caches to fetch in parallel and take whomever comes back first. + +So instead of a chain, we want a list of groups? + + + + + + + + + + + diff --git a/src/Imazen.Routing/Promises/hash_design.md b/src/Imazen.Routing/Promises/hash_design.md index 953966f3..e69de29b 100644 --- a/src/Imazen.Routing/Promises/hash_design.md +++ b/src/Imazen.Routing/Promises/hash_design.md @@ -1,26 +0,0 @@ -A StringBuilder has simplicity benefits. But for future optimization, we probably want -to use Pipe or -Microsoft.Toolkit.HighPerformance.ArrayPoolBufferWriter, and add a wrapper that formats data to it. Pipe can adapt to string for backcompat, but we can first try to read from the pipe directly. We call TryRead, if it returns IsCompleted, we can directly -send the data to the hasher via ReadOnlySequence (.net 5+) using a fixed stack buffer for the 32 bytes hash, and then encode a string as lowercase-hexadecimal. If not, we advance 0 and call AsStream, with allocating the bytes etc. - -We can add extension methods to IBufferWriter to write wtf16 strings and other values - - - - - -And in .NET Core, it offers enumerating chunks of ReadOnlyMemory. - -And these bytes are fine to hash directly using MemoryMarshal. - -Chunked hashing is possible with TransformBlock and TransformFinalBlock, but only if the chunks are multiples of the block size. And it's likely the slow path in .net 8. - -The .NET 8 fast path for streams uses a 4096 byte cryptopool buffer. A stream over the chunkenumerator of stringbuilder is definitely possible. - -public static byte[] GetBytesWithSpan(ReadOnlySpan charSpan) -{ -ReadOnlySpan byteSpan = MemoryMarshal.AsBytes(charSpan); -var bytes = new byte[byteSpan.Length]; -byteSpan.CopyTo(bytes); -return bytes; -} \ No newline at end of file diff --git a/src/Imazen.Routing/Serving/ImageServer.cs b/src/Imazen.Routing/Serving/ImageServer.cs index d9cf3df5..64e2d122 100644 --- a/src/Imazen.Routing/Serving/ImageServer.cs +++ b/src/Imazen.Routing/Serving/ImageServer.cs @@ -70,7 +70,7 @@ public ImageServer(IImageServerContainer container, var watermarkingLogic = container.GetService() ?? new WatermarkingLogicOptions(null, null); - var sourceCacheOptions = new ServerlessCacheEngineOptions + var sourceCacheOptions = new CacheEngineOptions { SeriesOfCacheGroups = [ @@ -88,9 +88,9 @@ public ImageServer(IImageServerContainer container, WatermarkingLogic = watermarkingLogic }; - pipeline = new ServerlessCacheEngine(null, sourceCacheOptions); + pipeline = new CacheEngine(null, sourceCacheOptions); pipeline = new ImagingMiddleware(null, imagingOptions); - pipeline = new ServerlessCacheEngine(pipeline, sourceCacheOptions); + pipeline = new CacheEngine(pipeline, sourceCacheOptions); } public string GetDiagnosticsPageSection(DiagnosticsPageArea area) diff --git a/src/Imazen.Routing/Caching/ExistenceProbableMap.cs b/src/Imazen.Routing/Unused/ExistenceProbableMap.cs similarity index 97% rename from src/Imazen.Routing/Caching/ExistenceProbableMap.cs rename to src/Imazen.Routing/Unused/ExistenceProbableMap.cs index 4ed74fff..c66cb14a 100644 --- a/src/Imazen.Routing/Caching/ExistenceProbableMap.cs +++ b/src/Imazen.Routing/Unused/ExistenceProbableMap.cs @@ -1,4 +1,5 @@ using System.Buffers; +using System.Diagnostics; using System.Security.Cryptography; using System.Text; using Imazen.Abstractions.BlobCache; @@ -108,6 +109,7 @@ await ReadAllBytesIntoBitArrayAsync(stream, target, ArrayPool.Shared, public async Task Sync(IBlobCache cache, IReusableBlobFactory blobFactory, CancellationToken cancellationToken = default){ + var sw = Stopwatch.StartNew(); // Fetch var existingData = await FetchSync(cache, cancellationToken: cancellationToken); if (existingData != null){ @@ -116,12 +118,13 @@ public async Task Sync(IBlobCache cache, IReusableBlobFactory blobFactory, Cance } //Put var bytes = buckets.ToBytes(); + sw.Stop(); var putRequest = CacheEventDetails.CreateFreshResultGeneratedEvent(BlobCacheRequest.Value, - blobFactory, Result.Ok(new BlobWrapper( + blobFactory, Result.Ok(new BlobWrapper(null, new ReusableArraySegmentBlob(new ArraySegment(bytes), new BlobAttributes() { ContentType = "application/octet-stream" - })))); + }, sw.Elapsed)))); var putResponse = await cache.CachePut(putRequest, cancellationToken); if (putResponse.IsError) diff --git a/src/Imazen.Routing/Caching/LegacyStreamCacheAdapter.cs b/src/Imazen.Routing/Unused/LegacyStreamCacheAdapter.cs similarity index 98% rename from src/Imazen.Routing/Caching/LegacyStreamCacheAdapter.cs rename to src/Imazen.Routing/Unused/LegacyStreamCacheAdapter.cs index 387255ec..f5f62adc 100644 --- a/src/Imazen.Routing/Caching/LegacyStreamCacheAdapter.cs +++ b/src/Imazen.Routing/Unused/LegacyStreamCacheAdapter.cs @@ -98,15 +98,17 @@ public async Task GetOrCreateBytes(byte[] key, AsyncBytesRes var wrappedCallback = new Func>>(async (ct) => { + var sw = Stopwatch.StartNew(); var cacheInputEntry = await dataProviderCallback(ct); + sw.Stop(); if (cacheInputEntry == null) throw new InvalidOperationException("Null entry provided by dataProviderCallback"); if (cacheInputEntry.Bytes.Array == null) throw new InvalidOperationException("Null entry byte array provided by dataProviderCallback"); IReusableBlob blobGenerated = new ReusableArraySegmentBlob(cacheInputEntry.Bytes, new BlobAttributes(){ - ContentType = cacheInputEntry.ContentType}); - return Result.Ok(new BlobWrapper(blobGenerated)); + ContentType = cacheInputEntry.ContentType}, sw.Elapsed); + return Result.Ok(new BlobWrapper(null, blobGenerated)); }); var result = await GetOrCreateResult(cacheRequest, wrappedCallback, cancellationToken, retrieveContentType); diff --git a/src/NugetPackageDefaults.targets b/src/NugetPackageDefaults.targets index d6280635..013b0550 100644 --- a/src/NugetPackageDefaults.targets +++ b/src/NugetPackageDefaults.targets @@ -1,12 +1,6 @@ - enable - true net8.0;netstandard2.0;net6.0 - latest - enable - - \ No newline at end of file diff --git a/src/NugetPackages.targets b/src/NugetPackages.targets index f8bc0525..2e757643 100644 --- a/src/NugetPackages.targets +++ b/src/NugetPackages.targets @@ -1,5 +1,10 @@ - + + enable + true + latest + enable + <_Parameter1>$([System.DateTime]::UtcNow.ToString("o")) diff --git a/tests/Imazen.HybridCache.Benchmark/packages.lock.json b/tests/Imazen.HybridCache.Benchmark/packages.lock.json index aa46f338..b433581f 100644 --- a/tests/Imazen.HybridCache.Benchmark/packages.lock.json +++ b/tests/Imazen.HybridCache.Benchmark/packages.lock.json @@ -182,11 +182,11 @@ }, "System.Text.Encodings.Web": { "type": "Transitive", - "resolved": "8.0.0", - "contentHash": "yev/k9GHAEGx2Rg3/tU6MQh4HGBXJs70y7j1LaM1i/ER9po+6nnQ6RRqTJn1E7Xu0fbIFK80Nh5EoODxrbxwBQ==", + "resolved": "6.0.0", + "contentHash": "Vg8eB5Tawm1IFqj4TVK1czJX89rhFxJo9ELqc/Eiq0eXy13RK00eubyU6TJE6y+GQXjyV5gSfiewDUZjQgSE0w==", "dependencies": { "System.Buffers": "4.5.1", - "System.Memory": "4.5.5", + "System.Memory": "4.5.4", "System.Runtime.CompilerServices.Unsafe": "6.0.0" } }, @@ -201,12 +201,12 @@ "imazen.abstractions": { "type": "Project", "dependencies": { - "Microsoft.Bcl.AsyncInterfaces": "[5.0.0, )", - "Microsoft.Extensions.Hosting.Abstractions": "[2.2.0, )", - "System.Buffers": "[4.5.1, )", - "System.Memory": "[4.5.5, )", - "System.Text.Encodings.Web": "[8.0.0, )", - "System.Threading.Tasks.Extensions": "[4.5.4, )" + "Microsoft.Bcl.AsyncInterfaces": "[6.*, )", + "Microsoft.Extensions.Hosting.Abstractions": "[2.*, )", + "System.Buffers": "[4.*, )", + "System.Memory": "[4.*, )", + "System.Text.Encodings.Web": "[6.*, )", + "System.Threading.Tasks.Extensions": "[4.*, )" } }, "imazen.common": { @@ -369,8 +369,8 @@ }, "System.Text.Encodings.Web": { "type": "Transitive", - "resolved": "8.0.0", - "contentHash": "yev/k9GHAEGx2Rg3/tU6MQh4HGBXJs70y7j1LaM1i/ER9po+6nnQ6RRqTJn1E7Xu0fbIFK80Nh5EoODxrbxwBQ==", + "resolved": "6.0.0", + "contentHash": "Vg8eB5Tawm1IFqj4TVK1czJX89rhFxJo9ELqc/Eiq0eXy13RK00eubyU6TJE6y+GQXjyV5gSfiewDUZjQgSE0w==", "dependencies": { "System.Runtime.CompilerServices.Unsafe": "6.0.0" } @@ -378,8 +378,8 @@ "imazen.abstractions": { "type": "Project", "dependencies": { - "Microsoft.Extensions.Hosting.Abstractions": "[2.2.0, )", - "System.Text.Encodings.Web": "[8.0.0, )" + "Microsoft.Extensions.Hosting.Abstractions": "[2.*, )", + "System.Text.Encodings.Web": "[6.*, )" } }, "imazen.common": { @@ -537,19 +537,22 @@ }, "System.Runtime.CompilerServices.Unsafe": { "type": "Transitive", - "resolved": "4.5.1", - "contentHash": "Zh8t8oqolRaFa9vmOZfdQm/qKejdqz0J9kr7o2Fu0vPeoH3BL1EOXipKWwkWtLT1JPzjByrF19fGuFlNbmPpiw==" + "resolved": "6.0.0", + "contentHash": "/iUeP3tq1S0XdNNoMz5C9twLSrM/TH+qElHkXWaPvuNOt+99G75NrV0OS2EqHx5wMN7popYjpc8oTjC1y16DLg==" }, "System.Text.Encodings.Web": { "type": "Transitive", - "resolved": "8.0.0", - "contentHash": "yev/k9GHAEGx2Rg3/tU6MQh4HGBXJs70y7j1LaM1i/ER9po+6nnQ6RRqTJn1E7Xu0fbIFK80Nh5EoODxrbxwBQ==" + "resolved": "6.0.0", + "contentHash": "Vg8eB5Tawm1IFqj4TVK1czJX89rhFxJo9ELqc/Eiq0eXy13RK00eubyU6TJE6y+GQXjyV5gSfiewDUZjQgSE0w==", + "dependencies": { + "System.Runtime.CompilerServices.Unsafe": "6.0.0" + } }, "imazen.abstractions": { "type": "Project", "dependencies": { - "Microsoft.Extensions.Hosting.Abstractions": "[2.2.0, )", - "System.Text.Encodings.Web": "[8.0.0, )" + "Microsoft.Extensions.Hosting.Abstractions": "[2.*, )", + "System.Text.Encodings.Web": "[6.*, )" } }, "imazen.common": { diff --git a/tests/Imazen.HybridCache.Tests/packages.lock.json b/tests/Imazen.HybridCache.Tests/packages.lock.json index a703e3b1..2ef8e634 100644 --- a/tests/Imazen.HybridCache.Tests/packages.lock.json +++ b/tests/Imazen.HybridCache.Tests/packages.lock.json @@ -156,11 +156,11 @@ }, "System.Text.Encodings.Web": { "type": "Transitive", - "resolved": "8.0.0", - "contentHash": "yev/k9GHAEGx2Rg3/tU6MQh4HGBXJs70y7j1LaM1i/ER9po+6nnQ6RRqTJn1E7Xu0fbIFK80Nh5EoODxrbxwBQ==", + "resolved": "6.0.0", + "contentHash": "Vg8eB5Tawm1IFqj4TVK1czJX89rhFxJo9ELqc/Eiq0eXy13RK00eubyU6TJE6y+GQXjyV5gSfiewDUZjQgSE0w==", "dependencies": { "System.Buffers": "4.5.1", - "System.Memory": "4.5.5", + "System.Memory": "4.5.4", "System.Runtime.CompilerServices.Unsafe": "6.0.0" } }, @@ -220,12 +220,12 @@ "imazen.abstractions": { "type": "Project", "dependencies": { - "Microsoft.Bcl.AsyncInterfaces": "[5.0.0, )", - "Microsoft.Extensions.Hosting.Abstractions": "[2.2.0, )", - "System.Buffers": "[4.5.1, )", - "System.Memory": "[4.5.5, )", - "System.Text.Encodings.Web": "[8.0.0, )", - "System.Threading.Tasks.Extensions": "[4.5.4, )" + "Microsoft.Bcl.AsyncInterfaces": "[6.*, )", + "Microsoft.Extensions.Hosting.Abstractions": "[2.*, )", + "System.Buffers": "[4.*, )", + "System.Memory": "[4.*, )", + "System.Text.Encodings.Web": "[6.*, )", + "System.Threading.Tasks.Extensions": "[4.*, )" } }, "imazen.common": { @@ -1209,8 +1209,8 @@ }, "System.Text.Encodings.Web": { "type": "Transitive", - "resolved": "8.0.0", - "contentHash": "yev/k9GHAEGx2Rg3/tU6MQh4HGBXJs70y7j1LaM1i/ER9po+6nnQ6RRqTJn1E7Xu0fbIFK80Nh5EoODxrbxwBQ==", + "resolved": "6.0.0", + "contentHash": "Vg8eB5Tawm1IFqj4TVK1czJX89rhFxJo9ELqc/Eiq0eXy13RK00eubyU6TJE6y+GQXjyV5gSfiewDUZjQgSE0w==", "dependencies": { "System.Runtime.CompilerServices.Unsafe": "6.0.0" } @@ -1351,8 +1351,8 @@ "imazen.abstractions": { "type": "Project", "dependencies": { - "Microsoft.Extensions.Hosting.Abstractions": "[2.2.0, )", - "System.Text.Encodings.Web": "[8.0.0, )" + "Microsoft.Extensions.Hosting.Abstractions": "[2.*, )", + "System.Text.Encodings.Web": "[6.*, )" } }, "imazen.common": { @@ -2108,8 +2108,8 @@ }, "System.Runtime.CompilerServices.Unsafe": { "type": "Transitive", - "resolved": "4.5.1", - "contentHash": "Zh8t8oqolRaFa9vmOZfdQm/qKejdqz0J9kr7o2Fu0vPeoH3BL1EOXipKWwkWtLT1JPzjByrF19fGuFlNbmPpiw==" + "resolved": "6.0.0", + "contentHash": "/iUeP3tq1S0XdNNoMz5C9twLSrM/TH+qElHkXWaPvuNOt+99G75NrV0OS2EqHx5wMN7popYjpc8oTjC1y16DLg==" }, "System.Runtime.Extensions": { "type": "Transitive", @@ -2336,8 +2336,11 @@ }, "System.Text.Encodings.Web": { "type": "Transitive", - "resolved": "8.0.0", - "contentHash": "yev/k9GHAEGx2Rg3/tU6MQh4HGBXJs70y7j1LaM1i/ER9po+6nnQ6RRqTJn1E7Xu0fbIFK80Nh5EoODxrbxwBQ==" + "resolved": "6.0.0", + "contentHash": "Vg8eB5Tawm1IFqj4TVK1czJX89rhFxJo9ELqc/Eiq0eXy13RK00eubyU6TJE6y+GQXjyV5gSfiewDUZjQgSE0w==", + "dependencies": { + "System.Runtime.CompilerServices.Unsafe": "6.0.0" + } }, "System.Text.RegularExpressions": { "type": "Transitive", @@ -2475,8 +2478,8 @@ "imazen.abstractions": { "type": "Project", "dependencies": { - "Microsoft.Extensions.Hosting.Abstractions": "[2.2.0, )", - "System.Text.Encodings.Web": "[8.0.0, )" + "Microsoft.Extensions.Hosting.Abstractions": "[2.*, )", + "System.Text.Encodings.Web": "[6.*, )" } }, "imazen.common": { diff --git a/tests/Imazen.LegacyDotNetCompatTests/Imazen.LegacyDotNetCompatTests.csproj b/tests/Imazen.LegacyDotNetCompatTests/Imazen.LegacyDotNetCompatTests.csproj deleted file mode 100644 index 0e9f20e6..00000000 --- a/tests/Imazen.LegacyDotNetCompatTests/Imazen.LegacyDotNetCompatTests.csproj +++ /dev/null @@ -1,100 +0,0 @@ - - - - - Debug - AnyCPU - {8EF6CC3F-027C-486C-B30B-09773E0C7966} - {FAE04EC0-301F-11D3-BF4B-00C04F79EFBC} - Library - Properties - Imazen.LegacyDotNetCompatTests - Imazen.LegacyDotNetCompatTests - v4.8.1 - 512 - - - AnyCPU - true - full - false - bin\Debug\ - DEBUG;TRACE - prompt - 4 - - - AnyCPU - pdbonly - true - bin\Release\ - TRACE - prompt - 4 - - - - - - - - ..\..\packages\xunit.abstractions.2.0.0\lib\net35\xunit.abstractions.dll - - - ..\..\packages\xunit.assert.2.1.0\lib\dotnet\xunit.assert.dll - - - ..\..\packages\xunit.extensibility.core.2.1.0\lib\dotnet\xunit.core.dll - - - ..\..\packages\xunit.extensibility.execution.2.1.0\lib\net45\xunit.execution.desktop.dll - - - - - - - - - {8c0df651-80a0-40a0-a45f-011528370952} - Imageflow.Server.Storage.AzureBlob - - - {ca4ff20a-2b21-4fda-85bd-e3b84610483c} - Imageflow.Server.Storage.RemoteReader - - - {aa748528-5f40-42a6-aa03-e1b87343bb38} - Imageflow.Server.Storage.S3 - - - {a04b9be0-4931-4305-b9ab-b79737130f20} - Imazen.Abstractions - - - {49bddbee-3385-4b1d-b7c1-76a893f25c00} - Imazen.Common - - - {ef8e2c18-430f-4efb-940c-d6ccb6ebf5e8} - Imazen.DiskCache - - - {02128bfa-c4ac-4cc8-8417-d84c4b699803} - Imazen.HybridCache - - - {5ef0f9ea-d6de-4e34-9141-4c44da08a726} - Imazen.Routing - - - - - diff --git a/tests/Imazen.LegacyDotNetCompatTests/Properties/AssemblyInfo.cs b/tests/Imazen.LegacyDotNetCompatTests/Properties/AssemblyInfo.cs deleted file mode 100644 index 56a20d25..00000000 --- a/tests/Imazen.LegacyDotNetCompatTests/Properties/AssemblyInfo.cs +++ /dev/null @@ -1,35 +0,0 @@ -using System.Reflection; -using System.Runtime.InteropServices; - -// General Information about an assembly is controlled through the following -// set of attributes. Change these attribute values to modify the information -// associated with an assembly. -[assembly: AssemblyTitle("Imazen.LegacyDotNetCompatTests")] -[assembly: AssemblyDescription("")] -[assembly: AssemblyConfiguration("")] -[assembly: AssemblyCompany("")] -[assembly: AssemblyProduct("Imazen.LegacyDotNetCompatTests")] -[assembly: AssemblyCopyright("Copyright © 2023")] -[assembly: AssemblyTrademark("")] -[assembly: AssemblyCulture("")] - -// Setting ComVisible to false makes the types in this assembly not visible -// to COM components. If you need to access a type in this assembly from -// COM, set the ComVisible attribute to true on that type. -[assembly: ComVisible(false)] - -// The following GUID is for the ID of the typelib if this project is exposed to COM -[assembly: Guid("8EF6CC3F-027C-486C-B30B-09773E0C7966")] - -// Version information for an assembly consists of the following four values: -// -// Major Version -// Minor Version -// Build Number -// Revision -// -// You can specify all the values or you can default the Build and Revision Numbers -// by using the '*' as shown below: -// [assembly: AssemblyVersion("1.0.*")] -[assembly: AssemblyVersion("1.0.0.0")] -[assembly: AssemblyFileVersion("1.0.0.0")] \ No newline at end of file diff --git a/tests/Imazen.LegacyDotNetCompatTests/Tests.cs b/tests/Imazen.LegacyDotNetCompatTests/Tests.cs deleted file mode 100644 index cc4b6ee7..00000000 --- a/tests/Imazen.LegacyDotNetCompatTests/Tests.cs +++ /dev/null @@ -1,16 +0,0 @@ -using Imazen.Abstractions.Blobs; -using Xunit; - -namespace Imazen.LegacyDotNetCompatTests -{ - public class Tests - { - [Fact] - public void Test1() - { - Assert.True(true); - var a = new BlobAttributes(); - - } - } -} \ No newline at end of file diff --git a/tests/Imazen.LegacyDotNetCompatTests/packages.config b/tests/Imazen.LegacyDotNetCompatTests/packages.config deleted file mode 100644 index 69a4ec43..00000000 --- a/tests/Imazen.LegacyDotNetCompatTests/packages.config +++ /dev/null @@ -1,9 +0,0 @@ - - - - - - - - - \ No newline at end of file diff --git a/tests/ImazenShared.Tests/ImazenShared.Tests.csproj b/tests/ImazenShared.Tests/ImazenShared.Tests.csproj index 0fa4505e..953d075e 100644 --- a/tests/ImazenShared.Tests/ImazenShared.Tests.csproj +++ b/tests/ImazenShared.Tests/ImazenShared.Tests.csproj @@ -2,7 +2,10 @@ Imazen.Common.Tests - net8.0;net48;net6.0 + + net8.0;net48;net6.0 + + net8.0;net6.0