diff --git a/CreateSnapshot/src/main/java/org/opensearch/migrations/CreateSnapshot.java b/CreateSnapshot/src/main/java/org/opensearch/migrations/CreateSnapshot.java index f0cd1c589..40982a4b3 100644 --- a/CreateSnapshot/src/main/java/org/opensearch/migrations/CreateSnapshot.java +++ b/CreateSnapshot/src/main/java/org/opensearch/migrations/CreateSnapshot.java @@ -165,7 +165,7 @@ public void run() { SnapshotRunner.runAndWaitForCompletion(snapshotCreator); } } catch (Exception e) { - log.atError().setMessage("Unexpected error running RfsWorker").setCause(e).log(); + log.atError().setCause(e).setMessage("Unexpected error running RfsWorker").log(); throw e; } } diff --git a/DocumentsFromSnapshotMigration/src/main/java/org/opensearch/migrations/RfsMigrateDocuments.java b/DocumentsFromSnapshotMigration/src/main/java/org/opensearch/migrations/RfsMigrateDocuments.java index 38f1e9d63..569e8b4a6 100644 --- a/DocumentsFromSnapshotMigration/src/main/java/org/opensearch/migrations/RfsMigrateDocuments.java +++ b/DocumentsFromSnapshotMigration/src/main/java/org/opensearch/migrations/RfsMigrateDocuments.java @@ -321,7 +321,7 @@ public static void main(String[] args) throws Exception { log.atWarn().setMessage("No work left to acquire. Exiting with error code to signal that.").log(); System.exit(NO_WORK_LEFT_EXIT_CODE); } catch (Exception e) { - log.atError().setMessage("Unexpected error running RfsWorker").setCause(e).log(); + log.atError().setCause(e).setMessage("Unexpected error running RfsWorker").log(); throw e; } } @@ -403,18 +403,10 @@ private static void confirmShardPrepIsComplete( ); return; } catch (IWorkCoordinator.LeaseLockHeldElsewhereException e) { - long finalLockRenegotiationMillis = lockRenegotiationMillis; - int finalShardSetupAttemptNumber = shardSetupAttemptNumber; - log.atInfo() - .setMessage( - () -> "After " - + finalShardSetupAttemptNumber - + "another process holds the lock" - + " for setting up the shard work items. " - + "Waiting " - + finalLockRenegotiationMillis - + "ms before trying again." - ) + log.atInfo().setMessage("After {} another process holds the lock for setting up the shard work items." + + " Waiting {} ms before trying again.") + .addArgument(shardSetupAttemptNumber) + .addArgument(lockRenegotiationMillis) .log(); Thread.sleep(lockRenegotiationMillis); lockRenegotiationMillis *= 2; diff --git a/DocumentsFromSnapshotMigration/src/test/java/org/opensearch/migrations/bulkload/ProcessLifecycleTest.java b/DocumentsFromSnapshotMigration/src/test/java/org/opensearch/migrations/bulkload/ProcessLifecycleTest.java index 1cad68a12..8d11f7005 100644 --- a/DocumentsFromSnapshotMigration/src/test/java/org/opensearch/migrations/bulkload/ProcessLifecycleTest.java +++ b/DocumentsFromSnapshotMigration/src/test/java/org/opensearch/migrations/bulkload/ProcessLifecycleTest.java @@ -150,7 +150,7 @@ private void testProcess(int expectedExitCode, Function proces esSourceContainer.copySnapshotData(tempDirSnapshot.toString()); int actualExitCode = processRunner.apply(new RunData(tempDirSnapshot, tempDirLucene, proxyContainer)); - log.atInfo().setMessage("Process exited with code: " + actualExitCode).log(); + log.atInfo().setMessage("Process exited with code: {}").addArgument(actualExitCode).log(); // Check if the exit code is as expected Assertions.assertEquals( @@ -230,7 +230,9 @@ private static ProcessBuilder setupProcess( failHow == FailHow.NEVER ? "PT10M" : "PT1S" }; // Kick off the doc migration process - log.atInfo().setMessage("Running RfsMigrateDocuments with args: " + Arrays.toString(args)).log(); + log.atInfo().setMessage("Running RfsMigrateDocuments with args: {}") + .addArgument(() -> Arrays.toString(args)) + .log(); ProcessBuilder processBuilder = new ProcessBuilder( javaExecutable, "-cp", @@ -247,7 +249,7 @@ private static ProcessBuilder setupProcess( private static Process runAndMonitorProcess(ProcessBuilder processBuilder) throws IOException { var process = processBuilder.start(); - log.atInfo().setMessage("Process started with ID: " + process.toHandle().pid()).log(); + log.atInfo().setMessage("Process started with ID: {}").addArgument(() -> process.toHandle().pid()).log(); BufferedReader reader = new BufferedReader(new InputStreamReader(process.getInputStream())); var readerThread = new Thread(() -> { @@ -261,7 +263,9 @@ private static Process runAndMonitorProcess(ProcessBuilder processBuilder) throw } String finalLine = line; log.atInfo() - .setMessage(() -> "from sub-process [" + process.toHandle().pid() + "]: " + finalLine) + .setMessage("from sub-process [{}]: {}") + .addArgument(() -> process.toHandle().pid()) + .addArgument(finalLine) .log(); } }); diff --git a/DocumentsFromSnapshotMigration/src/test/java/org/opensearch/migrations/bulkload/SourceTestBase.java b/DocumentsFromSnapshotMigration/src/test/java/org/opensearch/migrations/bulkload/SourceTestBase.java index 6175b0c26..7d38cffad 100644 --- a/DocumentsFromSnapshotMigration/src/test/java/org/opensearch/migrations/bulkload/SourceTestBase.java +++ b/DocumentsFromSnapshotMigration/src/test/java/org/opensearch/migrations/bulkload/SourceTestBase.java @@ -126,13 +126,8 @@ public static int migrateDocumentsSequentially( ); throw new ExpectedMigrationWorkTerminationException(e, runNumber); } catch (Exception e) { - log.atError() - .setCause(e) - .setMessage( - () -> "Caught an exception, " - + "but just going to run again with this worker to simulate task/container recycling" - ) - .log(); + log.atError().setCause(e).setMessage("Caught an exception, " + + "but just going to run again with this worker to simulate task/container recycling").log(); } } } @@ -167,7 +162,8 @@ public static DocumentsRunner.CompletionStatus migrateDocumentsWithOneWorker( var tempDir = Files.createTempDirectory("opensearchMigrationReindexFromSnapshot_test_lucene"); var shouldThrow = new AtomicBoolean(); try (var processManager = new LeaseExpireTrigger(workItemId -> { - log.atDebug().setMessage("Lease expired for " + workItemId + " making next document get throw").log(); + log.atDebug().setMessage("Lease expired for {} making next document get throw") + .addArgument(workItemId).log(); shouldThrow.set(true); })) { UnaryOperator terminatingDocumentFilter = d -> { diff --git a/MetadataMigration/src/main/java/org/opensearch/migrations/commands/Evaluate.java b/MetadataMigration/src/main/java/org/opensearch/migrations/commands/Evaluate.java index c7661b349..42876fba9 100644 --- a/MetadataMigration/src/main/java/org/opensearch/migrations/commands/Evaluate.java +++ b/MetadataMigration/src/main/java/org/opensearch/migrations/commands/Evaluate.java @@ -29,13 +29,13 @@ public EvaluateResult execute(RootMetadataMigrationContext context) { var items = migrateAllItems(migrationMode, clusters, transformer, context); evaluateResult.items(items); } catch (ParameterException pe) { - log.atError().setMessage("Invalid parameter").setCause(pe).log(); + log.atError().setCause(pe).setMessage("Invalid parameter").log(); evaluateResult .exitCode(INVALID_PARAMETER_CODE) .errorMessage("Invalid parameter: " + pe.getMessage()) .build(); } catch (Throwable e) { - log.atError().setMessage("Unexpected failure").setCause(e).log(); + log.atError().setCause(e).setMessage("Unexpected failure").log(); evaluateResult .exitCode(UNEXPECTED_FAILURE_CODE) .errorMessage("Unexpected failure: " + e.getMessage()) diff --git a/MetadataMigration/src/main/java/org/opensearch/migrations/commands/Migrate.java b/MetadataMigration/src/main/java/org/opensearch/migrations/commands/Migrate.java index e09d0d76e..00edd304f 100644 --- a/MetadataMigration/src/main/java/org/opensearch/migrations/commands/Migrate.java +++ b/MetadataMigration/src/main/java/org/opensearch/migrations/commands/Migrate.java @@ -29,13 +29,13 @@ public MigrateResult execute(RootMetadataMigrationContext context) { var items = migrateAllItems(migrationMode, clusters, transformer, context); migrateResult.items(items); } catch (ParameterException pe) { - log.atError().setMessage("Invalid parameter").setCause(pe).log(); + log.atError().setCause(pe).setMessage("Invalid parameter").log(); migrateResult .exitCode(INVALID_PARAMETER_CODE) .errorMessage("Invalid parameter: " + pe.getMessage()) .build(); } catch (Throwable e) { - log.atError().setMessage("Unexpected failure").setCause(e).log(); + log.atError().setCause(e).setMessage("Unexpected failure").log(); migrateResult .exitCode(UNEXPECTED_FAILURE_CODE) .errorMessage("Unexpected failure: " + e.getMessage()) diff --git a/RFS/src/main/java/org/opensearch/migrations/bulkload/common/DocumentReindexer.java b/RFS/src/main/java/org/opensearch/migrations/bulkload/common/DocumentReindexer.java index 78b780823..63a020191 100644 --- a/RFS/src/main/java/org/opensearch/migrations/bulkload/common/DocumentReindexer.java +++ b/RFS/src/main/java/org/opensearch/migrations/bulkload/common/DocumentReindexer.java @@ -65,9 +65,15 @@ BulkDocSection transformDocument(RfsLuceneDocument doc, String indexName) { Mono sendBulkRequest(UUID batchId, List docsBatch, String indexName, IDocumentReindexContext context, Scheduler scheduler) { return client.sendBulkRequest(indexName, docsBatch, context.createBulkRequest()) // Send the request - .doFirst(() -> log.atInfo().log("Batch Id:{}, {} documents in current bulk request.", batchId, docsBatch.size())) - .doOnSuccess(unused -> log.atDebug().log("Batch Id:{}, succeeded", batchId)) - .doOnError(error -> log.atError().log("Batch Id:{}, failed {}", batchId, error.getMessage())) + .doFirst(() -> log.atInfo().setMessage("Batch Id:{}, {} documents in current bulk request.") + .addArgument(batchId) + .addArgument(docsBatch::size) + .log()) + .doOnSuccess(unused -> log.atDebug().setMessage("Batch Id:{}, succeeded").addArgument(batchId).log()) + .doOnError(error -> log.atError().setMessage("Batch Id:{}, failed {}") + .addArgument(batchId) + .addArgument(error::getMessage) + .log()) // Prevent the error from stopping the entire stream, retries occurring within sendBulkRequest .onErrorResume(e -> Mono.empty()) .then() // Discard the response object diff --git a/RFS/src/main/java/org/opensearch/migrations/bulkload/common/LuceneDocumentsReader.java b/RFS/src/main/java/org/opensearch/migrations/bulkload/common/LuceneDocumentsReader.java index 1c1537c89..3c08e190f 100644 --- a/RFS/src/main/java/org/opensearch/migrations/bulkload/common/LuceneDocumentsReader.java +++ b/RFS/src/main/java/org/opensearch/migrations/bulkload/common/LuceneDocumentsReader.java @@ -119,7 +119,7 @@ Publisher readDocsByLeavesInParallel(DirectoryReader reader) var maxDocumentsToReadAtOnce = 100; // Arbitrary value log.atInfo().setMessage("{} documents in {} leaves found in the current Lucene index") .addArgument(reader::maxDoc) - .addArgument(reader.leaves()::size) + .addArgument(() -> reader.leaves().size()) .log(); // Create shared scheduler for i/o bound document reading @@ -157,7 +157,8 @@ protected RfsLuceneDocument getDocument(IndexReader reader, int docId, boolean i try { document = reader.document(docId); } catch (IOException e) { - log.atError().setMessage("Failed to read document at Lucene index location {}").addArgument(docId).setCause(e).log(); + log.atError().setCause(e).setMessage("Failed to read document at Lucene index location {}") + .addArgument(docId).log(); return null; } @@ -191,12 +192,14 @@ protected RfsLuceneDocument getDocument(IndexReader reader, int docId, boolean i } } if (id == null) { - log.atError().setMessage("Document with index" + docId + " does not have an id. Skipping").log(); + log.atError().setMessage("Document with index {} does not have an id. Skipping") + .addArgument(docId).log(); return null; // Skip documents with missing id } if (sourceBytes == null || sourceBytes.bytes.length == 0) { - log.atWarn().setMessage("Document {} doesn't have the _source field enabled").addArgument(id).log(); + log.atWarn().setMessage("Document {} doesn't have the _source field enabled") + .addArgument(id).log(); return null; // Skip these } @@ -205,7 +208,7 @@ protected RfsLuceneDocument getDocument(IndexReader reader, int docId, boolean i StringBuilder errorMessage = new StringBuilder(); errorMessage.append("Unable to parse Document id from Document. The Document's Fields: "); document.getFields().forEach(f -> errorMessage.append(f.name()).append(", ")); - log.atError().setMessage(errorMessage.toString()).setCause(e).log(); + log.atError().setCause(e).setMessage("{}").addArgument(errorMessage).log(); return null; // Skip documents with invalid id } diff --git a/RFS/src/main/java/org/opensearch/migrations/bulkload/common/OpenSearchClient.java b/RFS/src/main/java/org/opensearch/migrations/bulkload/common/OpenSearchClient.java index d043ec6ee..07aa9c5e2 100644 --- a/RFS/src/main/java/org/opensearch/migrations/bulkload/common/OpenSearchClient.java +++ b/RFS/src/main/java/org/opensearch/migrations/bulkload/common/OpenSearchClient.java @@ -355,10 +355,7 @@ public Mono sendBulkRequest(String indexName, List final var docsMap = docs.stream().collect(Collectors.toMap(d -> d.getDocId(), d -> d)); return Mono.defer(() -> { final String targetPath = indexName + "/_bulk"; - log.atTrace() - .setMessage("Creating bulk body with document ids {}") - .addArgument(() -> docsMap.keySet()) - .log(); + log.atTrace().setMessage("Creating bulk body with document ids {}").addArgument(docsMap::keySet).log(); var body = BulkDocSection.convertToBulkRequestBody(docsMap.values()); var additionalHeaders = new HashMap>(); // Reduce network bandwidth by attempting request and response compression @@ -398,9 +395,9 @@ public Mono sendBulkRequest(String indexName, List ); } else { log.atError() + .setCause(error) .setMessage("Unexpected empty document map for bulk request on index {}") .addArgument(indexName) - .setCause(error) .log(); } }); diff --git a/RFS/src/main/java/org/opensearch/migrations/bulkload/common/S3Repo.java b/RFS/src/main/java/org/opensearch/migrations/bulkload/common/S3Repo.java index 52fd7f758..be65a6cdd 100644 --- a/RFS/src/main/java/org/opensearch/migrations/bulkload/common/S3Repo.java +++ b/RFS/src/main/java/org/opensearch/migrations/bulkload/common/S3Repo.java @@ -192,7 +192,7 @@ public void prepBlobFiles(ShardMetadata shardMetadata) { + shardMetadata.getShardId() + "/"; - log.atInfo().setMessage("Downloading blob files from S3: s3://%s/%s to %s") + log.atInfo().setMessage("Downloading blob files from S3: s3://{}/{} to {}") .addArgument(s3RepoUri.bucketName) .addArgument(blobFilesS3Prefix) .addArgument(shardDirPath).log(); @@ -207,7 +207,7 @@ public void prepBlobFiles(ShardMetadata shardMetadata) { // Wait for the transfer to complete CompletedDirectoryDownload completedDirectoryDownload = directoryDownload.completionFuture().join(); - log.atInfo().setMessage(()->"Blob file download(s) complete").log(); + log.atInfo().setMessage("Blob file download(s) complete").log(); // Print out any failed downloads completedDirectoryDownload.failedTransfers().forEach(x->log.error("{}", x)); diff --git a/RFS/src/main/java/org/opensearch/migrations/bulkload/common/SnapshotCreator.java b/RFS/src/main/java/org/opensearch/migrations/bulkload/common/SnapshotCreator.java index 5fd55d8a5..79b549827 100644 --- a/RFS/src/main/java/org/opensearch/migrations/bulkload/common/SnapshotCreator.java +++ b/RFS/src/main/java/org/opensearch/migrations/bulkload/common/SnapshotCreator.java @@ -55,7 +55,7 @@ public void registerRepo() { client.registerSnapshotRepo(getRepoName(), settings, context); log.atInfo().setMessage("Snapshot repo registration successful").log(); } catch (Exception e) { - log.atError().setMessage("Snapshot repo registration failed").setCause(e).log(); + log.atError().setCause(e).setMessage("Snapshot repo registration failed").log(); throw new RepoRegistrationFailed(getRepoName()); } } @@ -72,7 +72,7 @@ public void createSnapshot() { client.createSnapshot(getRepoName(), snapshotName, body, context); log.atInfo().setMessage("Snapshot {} creation initiated").addArgument(snapshotName).log(); } catch (Exception e) { - log.atError().setMessage("Snapshot {} creation failed").addArgument(snapshotName).setCause(e).log(); + log.atError().setCause(e).setMessage("Snapshot {} creation failed").addArgument(snapshotName).log(); throw new SnapshotCreationFailed(snapshotName); } } @@ -82,7 +82,7 @@ public boolean isSnapshotFinished() { try { response = client.getSnapshotStatus(getRepoName(), snapshotName, context); } catch (Exception e) { - log.atError().setMessage("Failed to get snapshot status").setCause(e).log(); + log.atError().setCause(e).setMessage("Failed to get snapshot status").log(); throw new SnapshotStatusCheckFailed(snapshotName); } diff --git a/RFS/src/main/java/org/opensearch/migrations/bulkload/common/SnapshotShardUnpacker.java b/RFS/src/main/java/org/opensearch/migrations/bulkload/common/SnapshotShardUnpacker.java index 294b93c75..4c2aa73ba 100644 --- a/RFS/src/main/java/org/opensearch/migrations/bulkload/common/SnapshotShardUnpacker.java +++ b/RFS/src/main/java/org/opensearch/migrations/bulkload/common/SnapshotShardUnpacker.java @@ -51,8 +51,9 @@ public Path unpack() { try (FSDirectory primaryDirectory = FSDirectory.open(luceneIndexDir, lockFactory)) { for (ShardFileInfo fileMetadata : shardMetadata.getFiles()) { log.atInfo().setMessage("Unpacking - Blob Name: {}, Lucene Name: {}") - .addArgument(fileMetadata.getName()) - .addArgument(fileMetadata.getPhysicalName()).log(); + .addArgument(fileMetadata::getName) + .addArgument(fileMetadata::getPhysicalName) + .log(); try ( IndexOutput indexOutput = primaryDirectory.createOutput( fileMetadata.getPhysicalName(), diff --git a/RFS/src/main/java/org/opensearch/migrations/bulkload/common/http/GzipPayloadRequestTransformer.java b/RFS/src/main/java/org/opensearch/migrations/bulkload/common/http/GzipPayloadRequestTransformer.java index f3c6744a7..d14ea198d 100644 --- a/RFS/src/main/java/org/opensearch/migrations/bulkload/common/http/GzipPayloadRequestTransformer.java +++ b/RFS/src/main/java/org/opensearch/migrations/bulkload/common/http/GzipPayloadRequestTransformer.java @@ -93,10 +93,8 @@ private ByteBuffer gzipByteBufferSimple(final ByteBuffer inputBuffer) { } } if (inputBuffer.remaining() > 0) { - log.atDebug() - .setMessage("Gzip compression ratio: {}") - .addArgument(() -> String.format("%.2f%%", (double) baos.size() / inputBuffer.remaining() * 100)) - .log(); + log.atDebug().setMessage("Gzip compression ratio: {}") + .addArgument(() -> String.format("%.2f%%", (double) baos.size() / inputBuffer.remaining() * 100)).log(); } return ByteBuffer.wrap(baos.toByteArray()); } diff --git a/RFS/src/main/java/org/opensearch/migrations/bulkload/transformers/Transformer_ES_6_8_to_OS_2_11.java b/RFS/src/main/java/org/opensearch/migrations/bulkload/transformers/Transformer_ES_6_8_to_OS_2_11.java index a4005752c..5a2c4325b 100644 --- a/RFS/src/main/java/org/opensearch/migrations/bulkload/transformers/Transformer_ES_6_8_to_OS_2_11.java +++ b/RFS/src/main/java/org/opensearch/migrations/bulkload/transformers/Transformer_ES_6_8_to_OS_2_11.java @@ -67,7 +67,7 @@ public IndexMetadata transformIndexMetadata(IndexMetadata index) { } private void transformIndex(Index index, IndexType type) { - log.atDebug().setMessage(()->"Original Object: {}").addArgument(index.getRawJson().toString()).log(); + log.atDebug().setMessage("Original Object: {}").addArgument(index::getRawJson).log(); var newRoot = index.getRawJson(); switch (type) { @@ -85,7 +85,7 @@ private void transformIndex(Index index, IndexType type) { TransformFunctions.removeIntermediateIndexSettingsLevel(newRoot); // run before fixNumberOfReplicas TransformFunctions.fixReplicasForDimensionality(newRoot, awarenessAttributeDimensionality); - log.atDebug().setMessage(()->"Transformed Object: {}").addArgument(newRoot.toString()).log(); + log.atDebug().setMessage("Transformed Object: {}").addArgument(newRoot).log(); } private enum IndexType { diff --git a/RFS/src/main/java/org/opensearch/migrations/bulkload/transformers/Transformer_ES_7_10_OS_2_11.java b/RFS/src/main/java/org/opensearch/migrations/bulkload/transformers/Transformer_ES_7_10_OS_2_11.java index 48c9366a2..036303dd6 100644 --- a/RFS/src/main/java/org/opensearch/migrations/bulkload/transformers/Transformer_ES_7_10_OS_2_11.java +++ b/RFS/src/main/java/org/opensearch/migrations/bulkload/transformers/Transformer_ES_7_10_OS_2_11.java @@ -86,7 +86,7 @@ public GlobalMetadata transformGlobalMetadata(GlobalMetadata metaData) { @Override public IndexMetadata transformIndexMetadata(IndexMetadata indexData) { - log.atDebug().setMessage("Original Object: {}").addArgument(indexData.getRawJson()).log(); + log.atDebug().setMessage("Original Object: {}").addArgument(indexData::getRawJson).log(); var copy = indexData.deepCopy(); var newRoot = copy.getRawJson(); diff --git a/RFS/src/main/java/org/opensearch/migrations/bulkload/version_universal/RemoteReaderClient.java b/RFS/src/main/java/org/opensearch/migrations/bulkload/version_universal/RemoteReaderClient.java index a1838ba2b..9ed64b302 100644 --- a/RFS/src/main/java/org/opensearch/migrations/bulkload/version_universal/RemoteReaderClient.java +++ b/RFS/src/main/java/org/opensearch/migrations/bulkload/version_universal/RemoteReaderClient.java @@ -43,10 +43,7 @@ public ObjectNode getClusterData() { assert responses != null; var globalMetadata = globalMetadataFromParts(responses); - log.atDebug() - .setMessage("Combined global metadata:\n{}") - .addArgument(globalMetadata::toString) - .log(); + log.atDebug().setMessage("Combined global metadata:\n{}").addArgument(globalMetadata).log(); return globalMetadata; } @@ -81,10 +78,7 @@ public ObjectNode getIndexes() { .block(); var indexData = combineIndexDetails(indexDetailsList); - log.atDebug() - .setMessage("Index data combined:\n{}") - .addArgument(indexData::toString) - .log(); + log.atDebug().setMessage("Index data combined:\n{}").addArgument(indexData).log(); return indexData; } diff --git a/RFS/src/main/java/org/opensearch/migrations/bulkload/workcoordination/OpenSearchWorkCoordinator.java b/RFS/src/main/java/org/opensearch/migrations/bulkload/workcoordination/OpenSearchWorkCoordinator.java index ec9e91f92..6c8106b86 100644 --- a/RFS/src/main/java/org/opensearch/migrations/bulkload/workcoordination/OpenSearchWorkCoordinator.java +++ b/RFS/src/main/java/org/opensearch/migrations/bulkload/workcoordination/OpenSearchWorkCoordinator.java @@ -156,8 +156,9 @@ public void setup(Supplier - "Creating " + INDEX_NAME + " because HEAD returned " + indexCheckResponse.getStatusCode()) + log.atInfo().setMessage("Creating {} because HEAD returned {}") + .addArgument(INDEX_NAME) + .addArgument(indexCheckResponse::getStatusCode) .log(); return httpClient.makeJsonRequest(AbstractedHttpClient.PUT_METHOD, INDEX_NAME, null, body); } catch (Exception e) { @@ -277,18 +278,21 @@ DocumentModificationResult getResult(AbstractedHttpClient.AbstractHttpResponse r try { return DocumentModificationResult.parse(resultStr); } catch (Exception e) { - log.atWarn().setCause(e).setMessage(() -> "Caught exception while parsing the response").log(); - log.atWarn().setMessage(() -> "status: " + response.getStatusCode() + " " + response.getStatusText()).log(); - log.atWarn().setMessage(() -> "headers: " + - response.getHeaders() - .map(kvp->kvp.getKey() + ":" + kvp.getValue()) - .collect(Collectors.joining("\n"))) + log.atWarn().setCause(e).setMessage("Caught exception while parsing the response").log(); + log.atWarn().setMessage("status: {} {}") + .addArgument(response::getStatusCode) + .addArgument(response::getStatusText) .log(); - log.atWarn().setMessage(() -> { + log.atWarn().setMessage("headers: {}") + .addArgument(() -> response.getHeaders() + .map(kvp->kvp.getKey() + ":" + kvp.getValue()).collect(Collectors.joining("\n"))) + .log(); + log.atWarn().setMessage("Payload: {}") + .addArgument(() -> { try { - return "Payload: " + new String(response.getPayloadBytes(), StandardCharsets.UTF_8); + return new String(response.getPayloadBytes(), StandardCharsets.UTF_8); } catch (Exception e2) { - return "while trying to display response bytes, caught exception: " + e2; + return "EXCEPTION: while trying to display response bytes: " + e2; } } ) @@ -592,12 +596,12 @@ private WorkItemAndDuration getAssignedWorkItemUnsafe() var resultHitInner = resultHitsUpper.path("hits").path(0); var expiration = resultHitInner.path(SOURCE_FIELD_NAME).path(EXPIRATION_FIELD_NAME).longValue(); if (expiration == 0) { - log.atWarn().setMessage(() -> - "Expiration wasn't found or wasn't set to > 0 for response:" + response.toDiagnosticString()).log(); + log.atWarn().setMessage("Expiration wasn't found or wasn't set to > 0 for response: {}") + .addArgument(response::toDiagnosticString).log(); throw new MalformedAssignedWorkDocumentException(response); } var rval = new WorkItemAndDuration(resultHitInner.get("_id").asText(), Instant.ofEpochMilli(1000 * expiration)); - log.atInfo().setMessage(() -> "Returning work item and lease: " + rval).log(); + log.atInfo().setMessage("Returning work item and lease: {}").addArgument(rval).log(); return rval; } @@ -617,7 +621,7 @@ private WorkItemAndDuration getAssignedWorkItem(LeaseChecker leaseChecker, // there's no reason to not try at least a few times if (malformedDocRetries > MAX_MALFORMED_ASSIGNED_WORK_DOC_RETRIES) { ctx.addTraceException(e, true); - log.atError().setCause(e).setMessage(() -> + log.atError().setCause(e).setMessage( "Throwing exception because max tries (" + MAX_MALFORMED_ASSIGNED_WORK_DOC_RETRIES + ")" + " have been exhausted").log(); throw new RetriesExceededException(e, malformedDocRetries); @@ -633,8 +637,9 @@ private WorkItemAndDuration getAssignedWorkItem(LeaseChecker leaseChecker, (long) (Math.pow(2.0, (retries-1)) * ACQUIRE_WORK_RETRY_BASE_MS))); leaseChecker.checkRetryWaitTimeOrThrow(e, retries-1, sleepBeforeNextRetryDuration); - log.atWarn().setMessage(() -> "Couldn't complete work assignment due to exception. " - + "Backing off " + sleepBeforeNextRetryDuration + "ms and trying again.").setCause(e).log(); + log.atWarn().setCause(e) + .setMessage("Couldn't complete work assignment due to exception. Backing off {} and trying again.") + .addArgument(sleepBeforeNextRetryDuration).log(); Thread.sleep(sleepBeforeNextRetryDuration.toMillis()); } } @@ -706,16 +711,10 @@ static U doUntil( if (test.test(suppliedVal, transformedVal)) { return transformedVal; } else { - log.atWarn() - .setMessage( - () -> "Retrying " - + labelThatShouldBeAContext - + " because the predicate failed for: (" - + suppliedVal - + "," - + transformedVal - + ")" - ) + log.atWarn().setMessage("Retrying {} because the predicate failed for: ({},{})") + .addArgument(labelThatShouldBeAContext) + .addArgument(suppliedVal) + .addArgument(transformedVal) .log(); if (attempt >= maxTries) { context.recordFailure(); @@ -802,8 +801,9 @@ private void refresh(Supplier context leaseChecker.checkRetryWaitTimeOrThrow(e, driftRetries, sleepBeforeNextRetryDuration); } ++driftRetries; - log.atInfo().setCause(e).setMessage(() -> "Couldn't complete work assignment due to exception. " - + "Backing off " + sleepBeforeNextRetryDuration + "ms and trying again.").log(); + log.atInfo().setCause(e) + .setMessage("Couldn't complete work assignment due to exception. Backing off {} and retrying.") + .addArgument(sleepBeforeNextRetryDuration).log(); Thread.sleep(sleepBeforeNextRetryDuration.toMillis()); } } diff --git a/RFS/src/main/java/org/opensearch/migrations/bulkload/worker/DocumentsRunner.java b/RFS/src/main/java/org/opensearch/migrations/bulkload/worker/DocumentsRunner.java index 958b5d3e6..9dff2b2f1 100644 --- a/RFS/src/main/java/org/opensearch/migrations/bulkload/worker/DocumentsRunner.java +++ b/RFS/src/main/java/org/opensearch/migrations/bulkload/worker/DocumentsRunner.java @@ -101,13 +101,9 @@ private void doDocumentsMigration( reindexer.reindex(shardMetadata.getIndexName(), documents, context) .doOnError(error -> log.error("Error during reindexing: " + error)) .doOnSuccess( - done -> log.atInfo() - .setMessage( - () -> "Reindexing completed for Index " - + shardMetadata.getIndexName() - + ", Shard " - + shardMetadata.getShardId() - ) + done -> log.atInfo().setMessage("Reindexing completed for Index {}, Shard {}") + .addArgument(shardMetadata::getIndexName) + .addArgument(shardMetadata::getShardId) .log() ) // Wait for the reindexing to complete before proceeding diff --git a/RFS/src/main/java/org/opensearch/migrations/bulkload/worker/ShardWorkPreparer.java b/RFS/src/main/java/org/opensearch/migrations/bulkload/worker/ShardWorkPreparer.java index 7e79418ae..f437ad65a 100644 --- a/RFS/src/main/java/org/opensearch/migrations/bulkload/worker/ShardWorkPreparer.java +++ b/RFS/src/main/java/org/opensearch/migrations/bulkload/worker/ShardWorkPreparer.java @@ -70,7 +70,7 @@ public Void onAlreadyCompleted() throws IOException { @Override public Void onAcquiredWork(IWorkCoordinator.WorkItemAndDuration workItem) { - log.atInfo().setMessage(() -> "Acquired work to set the shard workitems").log(); + log.atInfo().setMessage("Acquired work to set the shard workitems").log(); prepareShardWorkItems( scopedWorkCoordinator.workCoordinator, metadataFactory, diff --git a/RFS/src/main/java/org/opensearch/migrations/reindexer/FailedRequestsLogger.java b/RFS/src/main/java/org/opensearch/migrations/reindexer/FailedRequestsLogger.java index d8c6e71b3..53f73e25d 100644 --- a/RFS/src/main/java/org/opensearch/migrations/reindexer/FailedRequestsLogger.java +++ b/RFS/src/main/java/org/opensearch/migrations/reindexer/FailedRequestsLogger.java @@ -32,7 +32,7 @@ public void logBulkFailure( ) .addArgument(indexName) .addArgument(failedItemCounter::getAsInt) - .addArgument(rootCause.getMessage()) + .addArgument(rootCause::getMessage) .addArgument(bulkRequestBodySupplier) .addArgument(responseBody::get) .log(); diff --git a/RFS/src/test/java/org/opensearch/migrations/bulkload/common/LuceneDocumentsReaderTest.java b/RFS/src/test/java/org/opensearch/migrations/bulkload/common/LuceneDocumentsReaderTest.java index d430f7fe2..eea8094bd 100644 --- a/RFS/src/test/java/org/opensearch/migrations/bulkload/common/LuceneDocumentsReaderTest.java +++ b/RFS/src/test/java/org/opensearch/migrations/bulkload/common/LuceneDocumentsReaderTest.java @@ -53,7 +53,7 @@ public class LuceneDocumentsReaderTest { @BeforeEach public void setUp() throws IOException { tempDirectory = Files.createTempDirectory("test-temp-dir"); - log.atDebug().log("Temporary directory created at: " + tempDirectory); + log.atDebug().setMessage("Temporary directory created at: {}").addArgument(tempDirectory).log(); } @AfterEach @@ -64,7 +64,7 @@ public void tearDown() throws IOException { try { Files.delete(path); } catch (IOException e) { - log.atError().setMessage("Failed to delete: " + path).setCause(e).log(); + log.atError().setCause(e).setMessage("Failed to delete: {}").addArgument(path).log(); } }); log.atDebug().log("Temporary directory deleted."); diff --git a/RFS/src/test/java/org/opensearch/migrations/bulkload/workcoordination/OpenSearchWorkCoodinatorTest.java b/RFS/src/test/java/org/opensearch/migrations/bulkload/workcoordination/OpenSearchWorkCoodinatorTest.java index a851ffb14..bf1de5422 100644 --- a/RFS/src/test/java/org/opensearch/migrations/bulkload/workcoordination/OpenSearchWorkCoodinatorTest.java +++ b/RFS/src/test/java/org/opensearch/migrations/bulkload/workcoordination/OpenSearchWorkCoodinatorTest.java @@ -101,7 +101,7 @@ public void testWhenGetResultAndErrorThenLogged() throws Exception { var closeableLogSetup = new CloseableLogSetup(workCoordinator.getClass().getName())) { Assertions.assertThrows(IllegalArgumentException.class, () -> workCoordinator.getResult(response)); - log.atDebug().setMessage(()->"Logged events: " + closeableLogSetup.getLogEvents()).log(); + log.atDebug().setMessage("Logged events: {}").addArgument(closeableLogSetup::getLogEvents).log(); Assertions.assertTrue(closeableLogSetup.getLogEvents().stream().anyMatch(e -> e.contains(THROTTLE_RESULT_VALUE))); } } @@ -124,7 +124,7 @@ public void testWhenInvokedWithHttpErrorThenLogged(Function "Logged events: " + closeableLogSetup.getLogEvents()).log(); + log.atDebug().setMessage("Logged events: {}").addArgument(()->closeableLogSetup.getLogEvents()).log(); var logEvents = closeableLogSetup.getLogEvents(); Assertions.assertTrue(logEvents.stream().anyMatch(e -> e.contains(THROTTLE_RESULT_VALUE))); } diff --git a/RFS/src/test/java/org/opensearch/migrations/bulkload/workcoordination/WorkCoordinatorTest.java b/RFS/src/test/java/org/opensearch/migrations/bulkload/workcoordination/WorkCoordinatorTest.java index 83263f4ed..39edd9a62 100644 --- a/RFS/src/test/java/org/opensearch/migrations/bulkload/workcoordination/WorkCoordinatorTest.java +++ b/RFS/src/test/java/org/opensearch/migrations/bulkload/workcoordination/WorkCoordinatorTest.java @@ -68,7 +68,7 @@ private JsonNode searchForExpiredDocs(long expirationEpochSeconds) { "" + expirationEpochSeconds ) + "}"; - log.atInfo().setMessage(() -> "Searching with... " + body).log(); + log.atInfo().setMessage("Searching with... {}").addArgument(body).log(); var response = httpClientSupplier.get() .makeJsonRequest( AbstractedHttpClient.GET_METHOD, @@ -162,15 +162,11 @@ public void testAcquireLeaseForQuery() throws Exception { Duration.ofSeconds(2), testContext::createAcquireNextItemContext ); - log.atInfo().setMessage(() -> "Next work item picked=" + nextWorkItem).log(); + log.atInfo().setMessage("Next work item picked={}").addArgument(nextWorkItem).log(); Assertions.assertInstanceOf(IWorkCoordinator.NoAvailableWorkToBeDone.class, nextWorkItem); } catch (OpenSearchWorkCoordinator.PotentialClockDriftDetectedException e) { - log.atError() - .setCause(e) - .setMessage( - () -> "Unexpected clock drift error. Got response: " - + searchForExpiredDocs(e.getTimestampEpochSeconds()) - ) + log.atError().setCause(e).setMessage("Unexpected clock drift error. Got response: {}") + .addArgument(() -> searchForExpiredDocs(e.getTimestampEpochSeconds())) .log(); } @@ -224,7 +220,7 @@ public String onNoAvailableWorkToBeDone() throws IOException { @Override public String onAcquiredWork(IWorkCoordinator.WorkItemAndDuration workItem) throws IOException, InterruptedException { - log.atInfo().setMessage(() -> "Next work item picked=" + workItem).log(); + log.atInfo().setMessage("Next work item picked={}").addArgument(workItem).log(); Assertions.assertNotNull(workItem); Assertions.assertNotNull(workItem.workItemId); Assertions.assertTrue(workItem.leaseExpirationTime.isAfter(oldNow)); @@ -243,10 +239,8 @@ public String onAcquiredWork(IWorkCoordinator.WorkItemAndDuration workItem) thro } catch (OpenSearchWorkCoordinator.PotentialClockDriftDetectedException e) { log.atError() .setCause(e) - .setMessage( - () -> "Unexpected clock drift error. Got response: " - + searchForExpiredDocs(e.getTimestampEpochSeconds()) - ) + .setMessage("Unexpected clock drift error. Got response: {}") + .addArgument(() -> searchForExpiredDocs(e.getTimestampEpochSeconds())) .log(); throw e; } diff --git a/RFS/src/testFixtures/java/org/opensearch/migrations/bulkload/framework/PreloadedDataContainerOrchestrator.java b/RFS/src/testFixtures/java/org/opensearch/migrations/bulkload/framework/PreloadedDataContainerOrchestrator.java index 6f977f259..e3a2c13c9 100644 --- a/RFS/src/testFixtures/java/org/opensearch/migrations/bulkload/framework/PreloadedDataContainerOrchestrator.java +++ b/RFS/src/testFixtures/java/org/opensearch/migrations/bulkload/framework/PreloadedDataContainerOrchestrator.java @@ -106,17 +106,12 @@ private int getHashCodeOfImagesAndArgs(DockerClient dockerClient, boolean pullIf var sourceImageId = getImageId(dockerClient, baseSourceVersion.imageName, pullIfUnavailable); var dataLoaderImageId = getImageId(dockerClient, dataLoaderImageName, pullIfUnavailable); var rval = Objects.hash(sourceImageId, dataLoaderImageId, Arrays.hashCode(generatorContainerArgs)); - log.atInfo() - .setMessage( - "sourceImageId=" - + sourceImageId - + " dataLoaderImageId=" - + dataLoaderImageId - + " args=" - + Arrays.stream(generatorContainerArgs).collect(Collectors.joining()) - + " hash: " - + rval - ) + log.atInfo().setMessage("{}") + .addArgument(() -> + "sourceImageId=" + sourceImageId + + " dataLoaderImageId=" + dataLoaderImageId + + " args=" + Arrays.stream(generatorContainerArgs).collect(Collectors.joining()) + + " hash: " + rval) .log(); return rval; } diff --git a/TrafficCapture/captureOffloader/src/test/java/org/opensearch/migrations/trafficcapture/StreamChannelConnectionCaptureSerializerTest.java b/TrafficCapture/captureOffloader/src/test/java/org/opensearch/migrations/trafficcapture/StreamChannelConnectionCaptureSerializerTest.java index 38122b18e..9bb7a2b2a 100644 --- a/TrafficCapture/captureOffloader/src/test/java/org/opensearch/migrations/trafficcapture/StreamChannelConnectionCaptureSerializerTest.java +++ b/TrafficCapture/captureOffloader/src/test/java/org/opensearch/migrations/trafficcapture/StreamChannelConnectionCaptureSerializerTest.java @@ -743,15 +743,13 @@ protected CompletableFuture kickoffCloseStream(CodedOutputStreamHolder out return CompletableFuture.runAsync(() -> { try { osh.getOutputStream().flush(); - log.atTrace().setMessage("Just flushed for {}").addArgument(osh.getOutputStream()).log(); + log.atTrace().setMessage("Just flushed for {}").addArgument(osh::getOutputStream).log(); var bb = osh.getByteBuffer(); bb.position(0); var bytesWritten = osh.getOutputStream().getTotalBytesWritten(); bb.limit(bytesWritten); - log.atTrace() - .setMessage("Adding {}") - .addArgument(() -> StandardCharsets.UTF_8.decode(bb.duplicate())) - .log(); + log.atTrace().setMessage("Adding {}") + .addArgument(() -> StandardCharsets.UTF_8.decode(bb.duplicate())).log(); outputBuffers.add(bb); } catch (IOException e) { throw Lombok.sneakyThrow(e); diff --git a/TrafficCapture/nettyWireLogging/src/main/java/org/opensearch/migrations/trafficcapture/netty/ConditionallyReliableLoggingHttpHandler.java b/TrafficCapture/nettyWireLogging/src/main/java/org/opensearch/migrations/trafficcapture/netty/ConditionallyReliableLoggingHttpHandler.java index d914be5bc..efd6ae668 100644 --- a/TrafficCapture/nettyWireLogging/src/main/java/org/opensearch/migrations/trafficcapture/netty/ConditionallyReliableLoggingHttpHandler.java +++ b/TrafficCapture/nettyWireLogging/src/main/java/org/opensearch/migrations/trafficcapture/netty/ConditionallyReliableLoggingHttpHandler.java @@ -41,17 +41,15 @@ protected void channelFinishedReadingAnHttpMessage( ((IWireCaptureContexts.IRequestContext) messageContext).onBlockingRequest(); messageContext = messageContext.createBlockingContext(); trafficOffloader.flushCommitAndResetStream(false).whenComplete((result, t) -> { - log.atInfo().setMessage(() -> "Done flushing").log(); + log.atInfo().setMessage("Done flushing").log(); if (t != null) { // This is a spot where we would benefit from having a behavioral policy that different users // could set as needed. Some users may be fine with just logging a failed offloading of a request // where other users may want to stop entirely. JIRA here: // https://opensearch.atlassian.net/browse/MIGRATIONS-1276 - log.atWarn() - .setCause(t) - .setMessage("Error offloading the request, but forwarding it to the service anyway") - .log(); + log.atWarn().setCause(t) + .setMessage("Error offloading the request, but forwarding it to the service anyway").log(); ReferenceCountUtil.release(msg); messageContext.addCaughtException(t); } diff --git a/TrafficCapture/trafficCaptureProxyServer/src/main/java/org/opensearch/migrations/trafficcapture/proxyserver/netty/BacksideConnectionPool.java b/TrafficCapture/trafficCaptureProxyServer/src/main/java/org/opensearch/migrations/trafficcapture/proxyserver/netty/BacksideConnectionPool.java index 3c243a80a..bbd39973b 100644 --- a/TrafficCapture/trafficCaptureProxyServer/src/main/java/org/opensearch/migrations/trafficcapture/proxyserver/netty/BacksideConnectionPool.java +++ b/TrafficCapture/trafficCaptureProxyServer/src/main/java/org/opensearch/migrations/trafficcapture/proxyserver/netty/BacksideConnectionPool.java @@ -75,7 +75,7 @@ private void logProgressAtInterval( Duration frequency ) { eventLoop.scheduleAtFixedRate( - () -> log.atLevel(logLevel).log(channelPoolMap.getStats().toString()), + () -> log.atLevel(logLevel).setMessage("{}").addArgument(channelPoolMap::getStats).log(), frequency.toMillis(), frequency.toMillis(), TimeUnit.MILLISECONDS diff --git a/TrafficCapture/trafficCaptureProxyServer/src/main/java/org/opensearch/migrations/trafficcapture/proxyserver/netty/BacksideHandler.java b/TrafficCapture/trafficCaptureProxyServer/src/main/java/org/opensearch/migrations/trafficcapture/proxyserver/netty/BacksideHandler.java index eec143603..5d78140c7 100644 --- a/TrafficCapture/trafficCaptureProxyServer/src/main/java/org/opensearch/migrations/trafficcapture/proxyserver/netty/BacksideHandler.java +++ b/TrafficCapture/trafficCaptureProxyServer/src/main/java/org/opensearch/migrations/trafficcapture/proxyserver/netty/BacksideHandler.java @@ -38,7 +38,9 @@ public void channelInactive(ChannelHandlerContext ctx) { @Override public void exceptionCaught(ChannelHandlerContext ctx, Throwable cause) { - log.atError().setCause(cause).setMessage("Caught error for channel: " + ctx.channel().id().asLongText()).log(); + log.atError().setCause(cause).setMessage("Caught error for channel: {}") + .addArgument(() -> ctx.channel().id().asLongText()) + .log(); FrontsideHandler.closeAndFlush(ctx.channel()); } } diff --git a/TrafficCapture/trafficCaptureProxyServer/src/main/java/org/opensearch/migrations/trafficcapture/proxyserver/netty/ExpiringSubstitutableItemPool.java b/TrafficCapture/trafficCaptureProxyServer/src/main/java/org/opensearch/migrations/trafficcapture/proxyserver/netty/ExpiringSubstitutableItemPool.java index f35c632e8..374e30a86 100644 --- a/TrafficCapture/trafficCaptureProxyServer/src/main/java/org/opensearch/migrations/trafficcapture/proxyserver/netty/ExpiringSubstitutableItemPool.java +++ b/TrafficCapture/trafficCaptureProxyServer/src/main/java/org/opensearch/migrations/trafficcapture/proxyserver/netty/ExpiringSubstitutableItemPool.java @@ -210,11 +210,16 @@ public Stats getStats() { // make a copy on the original thread making changes, which will be up to date at the time of capture and // immutable for future accessors, making it thread-safe var copiedStats = eventLoop.submit(() -> { - log.atTrace().setMessage(() -> "copying stats (" + System.identityHashCode(stats) + ")=" + stats).log(); + log.atTrace().setMessage("copying stats ({})={}") + .addArgument(() -> System.identityHashCode(stats)) + .addArgument(stats) + .log(); return new Stats(stats); }).get(); log.atTrace() - .setMessage(() -> "Got copied value of (" + System.identityHashCode(copiedStats) + ")=" + copiedStats) + .setMessage("Got copied value of ({})={}") + .addArgument(() -> System.identityHashCode(copiedStats)) + .addArgument(copiedStats) .log(); return copiedStats; } @@ -234,10 +239,10 @@ public F getAvailableOrNewItem() { throw new PoolClosedException(); } var startTime = Instant.now(); - log.atTrace().setMessage("getAvailableOrNewItem: readyItems.size()={}").addArgument(readyItems.size()).log(); + log.atTrace().setMessage("getAvailableOrNewItem: readyItems.size()={}").addArgument(readyItems::size).log(); var item = readyItems.poll(); log.atTrace().setMessage("getAvailableOrNewItem: item={} remaining readyItems.size()={}") - .addArgument(item).addArgument(readyItems.size()).log(); + .addArgument(item).addArgument(readyItems::size).log(); if (item != null) { stats.addHotGet(); beginLoadingNewItemIfNecessary(); diff --git a/TrafficCapture/trafficCaptureProxyServer/src/test/java/org/opensearch/migrations/trafficcapture/proxyserver/TestHeaderRewrites.java b/TrafficCapture/trafficCaptureProxyServer/src/test/java/org/opensearch/migrations/trafficcapture/proxyserver/TestHeaderRewrites.java index 72ca5c837..78f9d1e64 100644 --- a/TrafficCapture/trafficCaptureProxyServer/src/test/java/org/opensearch/migrations/trafficcapture/proxyserver/TestHeaderRewrites.java +++ b/TrafficCapture/trafficCaptureProxyServer/src/test/java/org/opensearch/migrations/trafficcapture/proxyserver/TestHeaderRewrites.java @@ -97,7 +97,7 @@ public void testBodyDoesntRewrite() throws Exception { fullRequest -> { var request = new SimpleNettyHttpServer.RequestToAdapter(fullRequest); capturedRequestList.add(request); - log.atTrace().setMessage(() -> "headers: " + + log.atTrace().setMessage("headers: {}").addArgument(() -> request.getHeaders().stream().map(kvp->kvp.getKey()+": "+kvp.getValue()) .collect(Collectors.joining())).log(); capturedBodies.add(fullRequest.content().toString(StandardCharsets.UTF_8)); diff --git a/TrafficCapture/trafficCaptureProxyServer/src/test/java/org/opensearch/migrations/trafficcapture/proxyserver/netty/HeaderAdderHandlerTest.java b/TrafficCapture/trafficCaptureProxyServer/src/test/java/org/opensearch/migrations/trafficcapture/proxyserver/netty/HeaderAdderHandlerTest.java index 99ccf6ba2..111314998 100644 --- a/TrafficCapture/trafficCaptureProxyServer/src/test/java/org/opensearch/migrations/trafficcapture/proxyserver/netty/HeaderAdderHandlerTest.java +++ b/TrafficCapture/trafficCaptureProxyServer/src/test/java/org/opensearch/migrations/trafficcapture/proxyserver/netty/HeaderAdderHandlerTest.java @@ -70,7 +70,7 @@ public static void sliceMessageIntoChannelWrites(EmbeddedChannel channel, String var endIdx = startIdx + len; var substr = msg.substring(lastStart.get(), Math.min(endIdx, msg.length())); lastStart.set(endIdx); - log.atTrace().setMessage(() -> "s: " + substr).log(); + log.atTrace().setMessage("s: {}").addArgument(substr).log(); return substr; }) .takeWhile(Objects::nonNull) diff --git a/TrafficCapture/trafficCaptureProxyServer/src/test/java/org/opensearch/migrations/trafficcapture/proxyserver/netty/HeaderRemoverHandlerTest.java b/TrafficCapture/trafficCaptureProxyServer/src/test/java/org/opensearch/migrations/trafficcapture/proxyserver/netty/HeaderRemoverHandlerTest.java index 73480a0d3..de3001e51 100644 --- a/TrafficCapture/trafficCaptureProxyServer/src/test/java/org/opensearch/migrations/trafficcapture/proxyserver/netty/HeaderRemoverHandlerTest.java +++ b/TrafficCapture/trafficCaptureProxyServer/src/test/java/org/opensearch/migrations/trafficcapture/proxyserver/netty/HeaderRemoverHandlerTest.java @@ -26,9 +26,9 @@ class HeaderRemoverHandlerTest { private static final int NUM_RANDOM_RUNS = 1_000; private void runTestsWithSize(BiFunction msgMaker, Supplier sizesSupplier) { - log.atDebug().setMessage(() -> - "sizes: " + sizesSupplier.get().limit(16).mapToObj(i->""+i) - .collect(Collectors.joining(","))).log(); + log.atDebug().setMessage("sizes: {}") + .addArgument(() -> sizesSupplier.get().limit(16).mapToObj(i->""+i).collect(Collectors.joining(","))) + .log(); runTestWithSize(b -> msgMaker.apply(b,"\n"), sizesSupplier.get()); runTestWithSize(b -> msgMaker.apply(b, "\r\n"), sizesSupplier.get()); } @@ -92,7 +92,7 @@ public void randomFragmentedCheckInterlaced() { final var bound = getBound(HeaderRemoverHandlerTest::makeInterlacedMessage); for (int i=0; i "random run={}").addArgument(i).log(); + log.atDebug().setMessage("random run={}").addArgument(i).log(); runTestsWithSize(HeaderRemoverHandlerTest::makeInterlacedMessage, () -> IntStream.generate(() -> r.nextInt(bound))); } @@ -113,7 +113,7 @@ public void randomFragmentedCheckConsecutive() { final var bound = getBound(HeaderRemoverHandlerTest::makeConsecutiveMessage); for (int i=0; i "random run={}").addArgument(i).log(); + log.atDebug().setMessage("random run={}").addArgument(i).log(); runTestsWithSize(HeaderRemoverHandlerTest::makeConsecutiveMessage, () -> IntStream.generate(() -> r.nextInt(bound))); } diff --git a/TrafficCapture/trafficReplayer/src/main/java/org/opensearch/migrations/replay/CapturedTrafficToHttpTransactionAccumulator.java b/TrafficCapture/trafficReplayer/src/main/java/org/opensearch/migrations/replay/CapturedTrafficToHttpTransactionAccumulator.java index 0d07d5a83..9adc56942 100644 --- a/TrafficCapture/trafficReplayer/src/main/java/org/opensearch/migrations/replay/CapturedTrafficToHttpTransactionAccumulator.java +++ b/TrafficCapture/trafficReplayer/src/main/java/org/opensearch/migrations/replay/CapturedTrafficToHttpTransactionAccumulator.java @@ -85,13 +85,9 @@ public String appendageToDescribeHowToSetMinimumGuaranteedLifetime() { @Override public void onExpireAccumulation(String partitionId, Accumulation accumulation) { connectionsExpiredCounter.incrementAndGet(); - log.atTrace() - .setMessage( - () -> "firing accumulation for accum=[" - + accumulation.getRrPair().getBeginningTrafficStreamKey() - + "]=" - + accumulation - ) + log.atTrace().setMessage("firing accumulation for accum=[{}]={}") + .addArgument(() -> accumulation.getRrPair().getBeginningTrafficStreamKey()) + .addArgument(accumulation) .log(); fireAccumulationsCallbacksAndClose( accumulation, @@ -194,8 +190,8 @@ private static String summarizeTrafficStream(TrafficStream ts) { public void accept(ITrafficStreamWithKey trafficStreamAndKey) { var yetToBeSequencedTrafficStream = trafficStreamAndKey.getStream(); - log.atTrace() - .setMessage(() -> "Got trafficStream: " + summarizeTrafficStream(yetToBeSequencedTrafficStream)) + log.atTrace().setMessage("Got trafficStream: {}") + .addArgument(() -> summarizeTrafficStream(yetToBeSequencedTrafficStream)) .log(); var partitionId = yetToBeSequencedTrafficStream.getNodeId(); var connectionId = yetToBeSequencedTrafficStream.getConnectionId(); @@ -206,14 +202,9 @@ public void accept(ITrafficStreamWithKey trafficStreamAndKey) { var o = trafficStream.getSubStreamList().get(i); var connectionStatus = addObservationToAccumulation(accum, tsk, o); if (CONNECTION_STATUS.CLOSED == connectionStatus) { - log.atInfo() - .setMessage( - () -> "Connection terminated: removing " - + partitionId - + ":" - + connectionId - + " from liveStreams map" - ) + log.atInfo().setMessage("Connection terminated: removing {}:{} from liveStreams map") + .addArgument(partitionId) + .addArgument(connectionId) .log(); liveStreams.remove(partitionId, connectionId); break; @@ -236,19 +227,14 @@ private Accumulation createInitialAccumulation(ITrafficStreamWithKey streamWithK if (key.getTrafficStreamIndex() == 0 && (stream.getPriorRequestsReceived() > 0 || stream.getLastObservationWasUnterminatedRead())) { log.atWarn() - .setMessage( - () -> "Encountered a TrafficStream object with inconsistent values between " - + "the prior request count (" - + stream.getPriorRequestsReceived() - + ", " - + "lastObservationWasUnterminatedRead (" - + stream.getLastObservationWasUnterminatedRead() - + ") and the index (" - + key.getTrafficStreamIndex() - + "). Traffic Observations will be ignored until Reads after the next EndOfMessage" - + " are encountered. Full stream object=" - + stream - ) + .setMessage("Encountered a TrafficStream object with inconsistent values between " + + "the prior request count ({}, lastObservationWasUnterminatedRead ({}) and the index ({}). " + + "Traffic Observations will be ignored until Reads after the next EndOfMessage" + + " are encountered. Full stream object={}") + .addArgument(stream::getPriorRequestsReceived) + .addArgument(stream::getLastObservationWasUnterminatedRead) + .addArgument(key::getTrafficStreamIndex) + .addArgument(stream) .log(); } @@ -265,9 +251,9 @@ public CONNECTION_STATUS addObservationToAccumulation( @NonNull ITrafficStreamKey trafficStreamKey, TrafficObservation observation ) { - log.atTrace() - .setMessage("{}") - .addArgument(() -> "Adding observation: " + observation + " with state=" + accum.state) + log.atTrace().setMessage("Adding observation: {} with state={}") + .addArgument(observation) + .addArgument(accum.state) .log(); var timestamp = TrafficStreamUtils.instantFromProtoTimestamp(observation.getTs()); liveStreams.expireOldEntries(trafficStreamKey, accum, timestamp); @@ -278,10 +264,9 @@ public CONNECTION_STATUS addObservationToAccumulation( .or(() -> handleObservationForReadState(accum, observation, trafficStreamKey, timestamp)) .or(() -> handleObservationForWriteState(accum, observation, trafficStreamKey, timestamp)) .orElseGet(() -> { - log.atWarn() - .setMessage( - () -> "unaccounted for observation type " + observation + " for " + accum.trafficChannelKey - ) + log.atWarn().setMessage("unaccounted for observation type {} for {}") + .addArgument(observation) + .addArgument(accum.trafficChannelKey) .log(); return CONNECTION_STATUS.ALIVE; }); @@ -342,13 +327,10 @@ private Optional handleCloseObservationThatAffectEveryState( exceptionConnectionCounter.incrementAndGet(); accum.resetForNextRequest(); log.atDebug() - .setMessage( - () -> "Removing accumulated traffic pair due to " - + "recorded connection exception event for " - + trafficStreamKey.getConnectionId() - ) + .setMessage("Removing accumulated traffic pair due to recorded connection exception event for {}") + .addArgument(trafficStreamKey::getConnectionId) .log(); - log.atTrace().setMessage(() -> "Accumulated object: " + accum).log(); + log.atTrace().setMessage("Accumulated object: {}").addArgument(accum).log(); return Optional.of(CONNECTION_STATUS.ALIVE); } return Optional.empty(); @@ -371,14 +353,22 @@ private Optional handleObservationForReadState( requestCounter.incrementAndGet(); } var rrPair = accum.getOrCreateTransactionPair(trafficStreamKey, originTimestamp); - log.atTrace().setMessage(() -> "Adding request data for accum[" + connectionId + "]=" + accum).log(); + log.atTrace().setMessage("Adding request data for accum[{}]={}") + .addArgument(connectionId) + .addArgument(accum).log(); rrPair.addRequestData(timestamp, observation.getRead().getData().toByteArray()); - log.atTrace().setMessage(() -> "Added request data for accum[" + connectionId + "]=" + accum).log(); + log.atTrace().setMessage("Added request data for accum[{}]={}") + .addArgument(connectionId) + .addArgument(accum) + .log(); } else if (observation.hasEndOfMessageIndicator()) { assert accum.hasRrPair(); handleEndOfRequest(accum); } else if (observation.hasReadSegment()) { - log.atTrace().setMessage(() -> "Adding request segment for accum[" + connectionId + "]=" + accum).log(); + log.atTrace().setMessage("Adding request segment for accum[{}]={}") + .addArgument(connectionId). + addArgument(accum) + .log(); var rrPair = accum.getOrCreateTransactionPair(trafficStreamKey, originTimestamp); if (rrPair.requestData == null) { rrPair.requestData = new HttpMessageAndTimestamp.Request(timestamp); @@ -386,7 +376,10 @@ private Optional handleObservationForReadState( } rrPair.addRequestData(timestamp, observation.getRead().getData().toByteArray()); rrPair.requestData.addSegment(observation.getReadSegment().getData().toByteArray()); - log.atTrace().setMessage(() -> "Added request segment for accum[" + connectionId + "]=" + accum).log(); + log.atTrace().setMessage("Added request segment for accum[{}]={}") + .addArgument(connectionId) + .addArgument(accum) + .log(); } else if (observation.hasSegmentEnd()) { var rrPair = accum.getRrPair(); assert rrPair.requestData.hasInProgressSegment(); @@ -413,17 +406,29 @@ private Optional handleObservationForWriteState( var connectionId = trafficStreamKey.getConnectionId(); if (observation.hasWrite()) { var rrPair = accum.getRrPair(); - log.atTrace().setMessage(() -> "Adding response data for accum[" + connectionId + "]=" + accum).log(); + log.atTrace().setMessage("Adding response data for accum[{}]={}") + .addArgument(connectionId) + .addArgument(accum) + .log(); rrPair.addResponseData(timestamp, observation.getWrite().getData().toByteArray()); - log.atTrace().setMessage(() -> "Added response data for accum[" + connectionId + "]=" + accum).log(); + log.atTrace().setMessage("Added response data for accum[{}]={}") + .addArgument(connectionId) + .addArgument(accum) + .log(); } else if (observation.hasWriteSegment()) { - log.atTrace().setMessage(() -> "Adding response segment for accum[" + connectionId + "]=" + accum).log(); + log.atTrace().setMessage("Adding response segment for accum[{}]={}") + .addArgument(connectionId) + .addArgument(accum) + .log(); var rrPair = accum.getRrPair(); if (rrPair.responseData == null) { rrPair.responseData = new HttpMessageAndTimestamp.Response(timestamp); } rrPair.responseData.addSegment(observation.getWriteSegment().getData().toByteArray()); - log.atTrace().setMessage(() -> "Added response segment for accum[" + connectionId + "]=" + accum).log(); + log.atTrace().setMessage("Added response segment for accum[{}]={}") + .addArgument(connectionId) + .addArgument(accum) + .log(); } else if (observation.hasSegmentEnd()) { var rrPair = accum.getRrPair(); assert rrPair.responseData.hasInProgressSegment(); @@ -435,7 +440,6 @@ private Optional handleObservationForWriteState( return Optional.empty(); } return Optional.of(CONNECTION_STATUS.ALIVE); - } private void handleDroppedRequestForAccumulation(Accumulation accum) { @@ -443,9 +447,9 @@ private void handleDroppedRequestForAccumulation(Accumulation accum) { var rrPair = accum.getRrPair(); rrPair.getTrafficStreamsHeld().forEach(listener::onTrafficStreamIgnored); } - log.atTrace().setMessage(() -> "resetting to forget " + accum.trafficChannelKey).log(); + log.atTrace().setMessage("resetting to forget {}").addArgument(accum.trafficChannelKey).log(); accum.resetToIgnoreAndForgetCurrentRequest(); - log.atTrace().setMessage(() -> "done resetting to forget and accum=" + accum).log(); + log.atTrace().setMessage("done resetting to forget and accum={}").addArgument(accum).log(); } // This function manages the transition case when an observation comes in that would terminate @@ -455,7 +459,7 @@ private boolean rotateAccumulationIfNecessary(String connectionId, Accumulation // If this was brand new, we don't need to care about triggering the callback. // We only need to worry about this if we have yet to send the RESPONSE. if (accum.state == Accumulation.State.ACCUMULATING_WRITES) { - log.atDebug().setMessage(() -> "handling EOM for accum[" + connectionId + "]=" + accum).log(); + log.atDebug().setMessage("handling EOM for accum[{}]={}").addArgument(connectionId).addArgument(accum).log(); handleEndOfResponse(accum, RequestResponsePacketPair.ReconstructionStatus.COMPLETE); return true; } @@ -526,13 +530,10 @@ private void fireAccumulationsCallbacksAndClose( // it's a difficult decision and one to be managed with a policy. // TODO - add Jira/github issue here. log.atWarn() - .setMessage( - "Terminating a TrafficStream reconstruction before data was accumulated " - + "for " - + accumulation.trafficChannelKey - + " assuming an empty server interaction and NOT " - + "reproducing this to the target cluster." - ) + .setMessage("Terminating a TrafficStream reconstruction before data was accumulated " + + "for {} assuming an empty server interaction and NOT " + + "reproducing this to the target cluster.") + .addArgument(accumulation.trafficChannelKey) .log(); if (accumulation.hasRrPair()) { listener.onTrafficStreamsExpired( diff --git a/TrafficCapture/trafficReplayer/src/main/java/org/opensearch/migrations/replay/ClientConnectionPool.java b/TrafficCapture/trafficReplayer/src/main/java/org/opensearch/migrations/replay/ClientConnectionPool.java index 433ff14a6..6be5780f3 100644 --- a/TrafficCapture/trafficReplayer/src/main/java/org/opensearch/migrations/replay/ClientConnectionPool.java +++ b/TrafficCapture/trafficReplayer/src/main/java/org/opensearch/migrations/replay/ClientConnectionPool.java @@ -87,30 +87,26 @@ public ConnectionReplaySession buildConnectionReplaySession(IReplayContexts.ICha () -> buildConnectionReplaySession(channelKeyCtx) ); log.atTrace() - .setMessage( - () -> "returning ReplaySession=" - + crs - + " for " - + channelKeyCtx.getConnectionId() - + " from " - + channelKeyCtx - ) + .setMessage("returning ReplaySession={} for {} from {}") + .addArgument(crs) + .addArgument(channelKeyCtx::getConnectionId) + .addArgument(channelKeyCtx) .log(); return crs; } public void closeConnection(IReplayContexts.IChannelKeyContext ctx, int sessionNumber) { var connId = ctx.getConnectionId(); - log.atTrace().setMessage(() -> "closing connection for " + connId).log(); + log.atTrace().setMessage("closing connection for {}").addArgument(connId).log(); var connectionReplaySession = connectionId2ChannelCache.getIfPresent(getKey(connId, sessionNumber)); if (connectionReplaySession != null) { closeClientConnectionChannel(connectionReplaySession); connectionId2ChannelCache.invalidate(connId); } else { log.atTrace() - .setMessage( - () -> "No ChannelFuture for " + ctx + " in closeConnection. " + + .setMessage("No ChannelFuture for {} in closeConnection. " + "The connection may have already been closed") + .addArgument(ctx) .log(); } } @@ -127,24 +123,30 @@ private TrackedFuture closeClientConnectionChannel(ConnectionRe .getChannelFutureInAnyState() // this could throw, especially if the even loop has begun to shut down .thenCompose(channelFuture -> { if (channelFuture == null) { - log.atTrace().setMessage(() -> - "Couldn't find the channel for " + session.getChannelKeyContext() + " to close it. " + - "It may have already been reset.").log(); + log.atTrace().setMessage("Couldn't find the channel for {} to close it. " + + "It may have already been reset.") + .addArgument(session::getChannelKeyContext) + .log(); return TextTrackedFuture.completedFuture(null, () -> ""); } - log.atTrace().setMessage(() -> - "closing channel " + channelFuture.channel() + "(" + session.getChannelKeyContext() + ")...").log(); + log.atTrace().setMessage("closing channel {} ({})...") + .addArgument(channelFuture::channel) + .addArgument(session::getChannelKeyContext) + .log(); return NettyFutureBinders.bindNettyFutureToTrackableFuture( channelFuture.channel().close(), "calling channel.close()") .thenApply(v -> { - log.atTrace().setMessage(() -> - "channel.close() has finished for " + session.getChannelKeyContext() + " with value=" + v).log(); + log.atTrace().setMessage("channel.close() has finished for {} with value={}") + .addArgument(session::getChannelKeyContext) + .addArgument(v) + .log(); if (session.hasWorkRemaining()) { - log.atWarn().setMessage(() -> - "Work items are still remaining for this connection session" + - "(last associated with connection=" + session.getChannelKeyContext() + "). " - + session.calculateSizeSlowly() + " requests that were enqueued won't be run").log(); + log.atWarn().setMessage("Work items are still remaining for this connection session " + + "(last associated with connection={}). {} requests that were enqueued won't be run") + .addArgument(session::getChannelKeyContext) + .addArgument(session::calculateSizeSlowly) + .log(); } session.schedule.clear(); return channelFuture.channel(); diff --git a/TrafficCapture/trafficReplayer/src/main/java/org/opensearch/migrations/replay/ParsedHttpMessagesAsDicts.java b/TrafficCapture/trafficReplayer/src/main/java/org/opensearch/migrations/replay/ParsedHttpMessagesAsDicts.java index cd88a1c86..4a0f87609 100644 --- a/TrafficCapture/trafficReplayer/src/main/java/org/opensearch/migrations/replay/ParsedHttpMessagesAsDicts.java +++ b/TrafficCapture/trafficReplayer/src/main/java/org/opensearch/migrations/replay/ParsedHttpMessagesAsDicts.java @@ -153,13 +153,10 @@ private static Map makeSafeMap( } catch (Exception e) { // TODO - this isn't a good design choice. // We should follow through with the spirit of this class and leave this as empty optional values - log.atWarn() - .setMessage( - () -> "Putting what may be a bogus value in the output because transforming it " - + "into json threw an exception for " - + context - ) - .setCause(e) + log.atWarn().setCause(e) + .setMessage("Putting what may be a bogus value in the output because transforming it " + + "into json threw an exception for {}") + .addArgument(context) .log(); return Map.of(EXCEPTION_KEY_STRING, (Object) e.toString()); } diff --git a/TrafficCapture/trafficReplayer/src/main/java/org/opensearch/migrations/replay/ReplayEngine.java b/TrafficCapture/trafficReplayer/src/main/java/org/opensearch/migrations/replay/ReplayEngine.java index d95b29854..8517617d4 100644 --- a/TrafficCapture/trafficReplayer/src/main/java/org/opensearch/migrations/replay/ReplayEngine.java +++ b/TrafficCapture/trafficReplayer/src/main/java/org/opensearch/migrations/replay/ReplayEngine.java @@ -121,45 +121,30 @@ private TrackedFuture hookWorkFinishingUpdates( f -> f.whenComplete((v, t) -> Utils.setIfLater(lastCompletedSourceTimeEpochMs, timestamp.toEpochMilli())) .whenComplete((v, t) -> { var newCount = totalCountOfScheduledTasksOutstanding.decrementAndGet(); - log.atInfo() - .setMessage( - () -> "Scheduled task '" - + taskDescription - + "' finished (" - + stringableKey - + ") decremented tasksOutstanding to " - + newCount - ) + log.atInfo().setMessage("Scheduled task '{}' finished ({}) decremented tasksOutstanding to {}") + .addArgument(taskDescription) + .addArgument(stringableKey) + .addArgument(newCount) .log(); }) .whenComplete((v, t) -> contentTimeController.stopReadsPast(timestamp)) - .whenComplete( - (v, t) -> log.atDebug() - .setMessage( - () -> "work finished and used timestamp=" - + timestamp - + " to update contentTimeController (tasksOutstanding=" - + totalCountOfScheduledTasksOutstanding.get() - + ")" - ) - .log() + .whenComplete((v, t) -> log.atDebug() + .setMessage("work finished and used timestamp={} " + + "to update contentTimeController (tasksOutstanding={})") + .addArgument(timestamp) + .addArgument(totalCountOfScheduledTasksOutstanding::get) + .log() ), () -> "Updating fields for callers to poll progress and updating backpressure" ); } private static void logStartOfWork(Object stringableKey, long newCount, Instant start, String label) { - log.atInfo() - .setMessage( - () -> "Scheduling '" - + label - + "' (" - + stringableKey - + ") to run at " - + start - + " incremented tasksOutstanding to " - + newCount - ) + log.atInfo().setMessage("Scheduling '{}' ({}) to run at {} incremented tasksOutstanding to {}") + .addArgument(label) + .addArgument(stringableKey) + .addArgument(start) + .addArgument(newCount) .log(); } @@ -196,9 +181,13 @@ public TrackedFuture scheduleRequest( var requestKey = ctx.getReplayerRequestKey(); logStartOfWork(requestKey, newCount, start, label); - log.atDebug().setMessage( - () -> "Scheduling request for " + ctx + " to run from [" + start + ", " + end + " with an interval of " - + interval + " for " + numPackets + " packets").log(); + log.atDebug().setMessage("Scheduling request for {} to run from [{}, {}] with an interval of {} for {} packets") + .addArgument(ctx) + .addArgument(start) + .addArgument(end) + .addArgument(interval) + .addArgument(numPackets) + .log(); var result = networkSendOrchestrator.scheduleRequest(requestKey, ctx, start, interval, packets, retryVisitor); return hookWorkFinishingUpdates(result, originalStart, requestKey, label); } diff --git a/TrafficCapture/trafficReplayer/src/main/java/org/opensearch/migrations/replay/RequestSenderOrchestrator.java b/TrafficCapture/trafficReplayer/src/main/java/org/opensearch/migrations/replay/RequestSenderOrchestrator.java index 1b60c4ec6..f501a523a 100644 --- a/TrafficCapture/trafficReplayer/src/main/java/org/opensearch/migrations/replay/RequestSenderOrchestrator.java +++ b/TrafficCapture/trafficReplayer/src/main/java/org/opensearch/migrations/replay/RequestSenderOrchestrator.java @@ -102,7 +102,10 @@ public TrackedFuture scheduleWork( ctx.getChannelKeyContext(), ctx.getReplayerRequestKey().sourceRequestIndexSessionIdentifier ); - log.atDebug().setMessage(() -> "Scheduling work for " + ctx.getConnectionId() + " at time " + timestamp).log(); + log.atDebug().setMessage("Scheduling work for {} at time {}") + .addArgument(ctx::getConnectionId) + .addArgument(timestamp) + .log(); var scheduledContext = ctx.createScheduledContext(timestamp); // This method doesn't use the scheduling that scheduleRequest and scheduleClose use because // doing work associated with a connection is considered to be preprocessing work independent @@ -187,7 +190,10 @@ public TrackedFuture scheduleClose( ) { var channelKey = ctx.getChannelKey(); var channelInteraction = new IndexedChannelInteraction(channelKey, channelInteractionNum); - log.atDebug().setMessage(() -> "Scheduling CLOSE for " + channelInteraction + " at time " + timestamp).log(); + log.atDebug().setMessage("Scheduling CLOSE for {} at time {}") + .addArgument(channelInteraction) + .addArgument(timestamp) + .log(); return submitUnorderedWorkToEventLoop( ctx, sessionNumber, @@ -245,16 +251,10 @@ private TrackedFuture submitUnorderedWorkToEventLoop( final var replaySession = clientConnectionPool.getCachedSession(ctx, sessionNumber); return NettyFutureBinders.bindNettySubmitToTrackableFuture(replaySession.eventLoop) .getDeferredFutureThroughHandle((v, t) -> { - log.atTrace() - .setMessage("{}") - .addArgument( - () -> "adding work item at slot " - + channelInteractionNumber - + " for " - + replaySession.getChannelKeyContext() - + " with " - + replaySession.scheduleSequencer - ) + log.atTrace().setMessage("adding work item at slot {} for {} with {}") + .addArgument(channelInteractionNumber) + .addArgument(replaySession::getChannelKeyContext) + .addArgument(replaySession.scheduleSequencer) .log(); return replaySession.scheduleSequencer.addFutureForWork( channelInteractionNumber, @@ -323,7 +323,11 @@ private TrackedFuture scheduleOnConnectionReplaySession( Instant atTime, ChannelTask task ) { - log.atInfo().setMessage(() -> channelInteraction + " scheduling " + task.kind + " at " + atTime).log(); + log.atInfo().setMessage("{} scheduling {} at {}") + .addArgument(channelInteraction) + .addArgument(task.kind) + .addArgument(atTime) + .log(); var schedule = channelFutureAndRequestSchedule.schedule; var eventLoop = channelFutureAndRequestSchedule.eventLoop; @@ -333,8 +337,10 @@ private TrackedFuture scheduleOnConnectionReplaySession( : "Per-connection TrafficStream ordering should force a time ordering on incoming requests"; var workPointTrigger = schedule.appendTaskTrigger(atTime, task.kind).scheduleFuture; var workFuture = task.getRunnable().apply(workPointTrigger); - log.atTrace() - .setMessage(() -> channelInteraction + " added a scheduled event at " + atTime + "... " + schedule) + log.atTrace().setMessage("{} added a scheduled event at {}... {}") + .addArgument(channelInteraction) + .addArgument(atTime) + .addArgument(schedule) .log(); if (wasEmpty) { bindNettyScheduleToCompletableFuture(eventLoop, atTime, workPointTrigger.future); @@ -344,14 +350,9 @@ private TrackedFuture scheduleOnConnectionReplaySession( var itemStartTimeOfPopped = schedule.removeFirstItem(); assert atTime.equals(itemStartTimeOfPopped) : "Expected to have popped the item to match the start time for the responseFuture that finished"; - log.atDebug() - .setMessage("{}") - .addArgument( - () -> channelInteraction.toString() - + " responseFuture completed - checking " - + schedule - + " for the next item to schedule" - ) + log.atDebug().setMessage("{} responseFuture completed - checking {} for the next item to schedule") + .addArgument(channelInteraction::toString) + .addArgument(schedule) .log(); Optional.ofNullable(schedule.peekFirstItem()) .ifPresent(kvp -> bindNettyScheduleToCompletableFuture(eventLoop, kvp.startTime, kvp.scheduleFuture)); @@ -399,7 +400,7 @@ private Duration doubleRetryDelayCapped(Duration d) { } if (dtr.directive == RetryDirective.RETRY) { var newStartTime = referenceStartTime.plus(nextRetryDelay); - log.atInfo().setMessage(() -> "Making request scheduled at " + newStartTime).log(); + log.atInfo().setMessage("Making request scheduled at {}").addArgument(newStartTime).log(); var schedulingDelay = Duration.between(now(), newStartTime); return NettyFutureBinders.bindNettyScheduleToCompletableFuture( eventLoop, schedulingDelay) @@ -423,7 +424,7 @@ private TrackedFuture sendPackets( AtomicInteger requestPacketCounter ) { final var oldCounter = requestPacketCounter.getAndIncrement(); - log.atTrace().setMessage(() -> "sendNextPartAndContinue: packetCounter=" + oldCounter).log(); + log.atTrace().setMessage("sendNextPartAndContinue: packetCounter={}").addArgument(oldCounter).log(); assert iterator.hasNext() : "Should not have called this with no items to send"; var consumeFuture = packetReceiver.consumeBytes(iterator.next().retainedDuplicate()); diff --git a/TrafficCapture/trafficReplayer/src/main/java/org/opensearch/migrations/replay/RequestTransformerAndSender.java b/TrafficCapture/trafficReplayer/src/main/java/org/opensearch/migrations/replay/RequestTransformerAndSender.java index 04bf8cc60..c375d994c 100644 --- a/TrafficCapture/trafficReplayer/src/main/java/org/opensearch/migrations/replay/RequestTransformerAndSender.java +++ b/TrafficCapture/trafficReplayer/src/main/java/org/opensearch/migrations/replay/RequestTransformerAndSender.java @@ -102,7 +102,10 @@ public TrackedFuture transformAndSendRequest( start, () -> transformAllData(inputRequestTransformerFactory.create(ctx), packetsSupplier) ); - log.atDebug().setMessage(() -> "request transform future for " + ctx + " = " + requestReadyFuture).log(); + log.atDebug().setMessage("request transform future for {} = {}") + .addArgument(ctx) + .addArgument(requestReadyFuture) + .log(); // It might be safer to chain this work directly inside the scheduleWork call above so that the // read buffer horizons aren't set after the transformation work finishes, but after the packets // are fully handled @@ -133,26 +136,27 @@ private static TrackedFuture transformAllData( var packets = packetSupplier.get().map(Unpooled::wrappedBuffer); packets.forEach(packetData -> { log.atDebug() - .setMessage( - () -> logLabel + " sending " + packetData.readableBytes() + " bytes to the packetHandler" - ) + .setMessage("{} sending {} bytes to the packetHandler") + .addArgument(logLabel) + .addArgument(packetData::readableBytes) .log(); var consumeFuture = packetHandler.consumeBytes(packetData); - log.atDebug().setMessage(() -> logLabel + " consumeFuture = " + consumeFuture).log(); + log.atDebug().setMessage("{} consumeFuture = {}") + .addArgument(logLabel) + .addArgument(consumeFuture) + .log(); }); - log.atDebug().setMessage(() -> logLabel + " done sending bytes, now finalizing the request").log(); + log.atDebug().setMessage("{} done sending bytes, now finalizing the request").addArgument(logLabel).log(); return packetHandler.finalizeRequest(); } catch (Exception e) { log.atInfo() .setCause(e) .setMessage( "Encountered an exception while transforming the http request. " - + "The base64 gzipped traffic stream, for later diagnostic purposes, is: " - + Utils.packetsToCompressedTrafficStream(packetSupplier.get()) - ) + + "The base64 gzipped traffic stream, for later diagnostic purposes, is: {}") + .addArgument(() -> Utils.packetsToCompressedTrafficStream(packetSupplier.get())) .log(); throw e; } } - } diff --git a/TrafficCapture/trafficReplayer/src/main/java/org/opensearch/migrations/replay/ResultsToLogsConsumer.java b/TrafficCapture/trafficReplayer/src/main/java/org/opensearch/migrations/replay/ResultsToLogsConsumer.java index ca9d38815..4ef0c825a 100644 --- a/TrafficCapture/trafficReplayer/src/main/java/org/opensearch/migrations/replay/ResultsToLogsConsumer.java +++ b/TrafficCapture/trafficReplayer/src/main/java/org/opensearch/migrations/replay/ResultsToLogsConsumer.java @@ -137,19 +137,17 @@ private Map toJSONObject(SourceTargetCaptureTuple tuple, ParsedH */ public void accept(SourceTargetCaptureTuple tuple, ParsedHttpMessagesAsDicts parsedMessages) { final var index = tupleCounter.getAndIncrement(); - progressLogger.atInfo() - .setMessage("{}") - .addArgument(() -> toTransactionSummaryString(index, tuple, parsedMessages)) - .log(); + progressLogger.atInfo().setMessage("{}") + .addArgument(() -> toTransactionSummaryString(index, tuple, parsedMessages)).log(); if (tupleLogger.isInfoEnabled()) { try { var originalTuple = toJSONObject(tuple, parsedMessages); var transformedTuple = tupleTransformer.transformJson(originalTuple); var tupleString = PLAIN_MAPPER.writeValueAsString(transformedTuple); - tupleLogger.atInfo().setMessage("{}").addArgument(() -> tupleString).log(); + tupleLogger.atInfo().setMessage("{}").addArgument(tupleString).log(); } catch (Exception e) { - log.atError().setMessage("Exception converting tuple to string").setCause(e).log(); - tupleLogger.atInfo().setMessage("{}").addArgument("{ \"error\":\"" + e.getMessage() + "\" }").log(); + log.atError().setCause(e).setMessage("Exception converting tuple to string").log(); + tupleLogger.atInfo().setMessage("{ \"error\":\"{}\" }").addArgument(e::getMessage).log(); throw Lombok.sneakyThrow(e); } } diff --git a/TrafficCapture/trafficReplayer/src/main/java/org/opensearch/migrations/replay/TrafficReplayer.java b/TrafficCapture/trafficReplayer/src/main/java/org/opensearch/migrations/replay/TrafficReplayer.java index 275d505ff..4a167257a 100644 --- a/TrafficCapture/trafficReplayer/src/main/java/org/opensearch/migrations/replay/TrafficReplayer.java +++ b/TrafficCapture/trafficReplayer/src/main/java/org/opensearch/migrations/replay/TrafficReplayer.java @@ -337,7 +337,7 @@ public static void main(String[] args) throws Exception { final var msg = "Exception parsing " + params.targetUriString; System.err.println(msg); System.err.println(e.getMessage()); - log.atError().setMessage(msg).setCause(e).log(); + log.atError().setCause(e).setMessage("{}").addArgument(msg).log(); System.exit(3); return; } @@ -420,10 +420,9 @@ public static void main(String[] args) throws Exception { ); ActiveContextMonitor finalActiveContextMonitor = activeContextMonitor; scheduledExecutorService.scheduleAtFixedRate(() -> { - activeContextLogger.atInfo() - .setMessage( - () -> "Total requests outstanding at " + Instant.now() + ": " + tr.requestWorkTracker.size() - ) + activeContextLogger.atInfo().setMessage("Total requests outstanding at {}: {}") + .addArgument(Instant::now) + .addArgument(tr.requestWorkTracker::size) .log(); finalActiveContextMonitor.run(); }, ACTIVE_WORK_MONITOR_CADENCE_MS, ACTIVE_WORK_MONITOR_CADENCE_MS, TimeUnit.MILLISECONDS); @@ -445,9 +444,9 @@ public static void main(String[] args) throws Exception { var acmLevel = globalContextTracker.getActiveScopesByAge().findAny().isPresent() ? Level.ERROR : Level.INFO; - activeContextLogger.atLevel(acmLevel).setMessage(() -> "Outstanding work after shutdown...").log(); + activeContextLogger.atLevel(acmLevel).setMessage("Outstanding work after shutdown...").log(); activeContextMonitor.run(); - activeContextLogger.atLevel(acmLevel).setMessage(() -> "[end of run]]").log(); + activeContextLogger.atLevel(acmLevel).setMessage("[end of run]]").log(); } } } diff --git a/TrafficCapture/trafficReplayer/src/main/java/org/opensearch/migrations/replay/TrafficReplayerCore.java b/TrafficCapture/trafficReplayer/src/main/java/org/opensearch/migrations/replay/TrafficReplayerCore.java index 4acf7d6ae..ef9761d79 100644 --- a/TrafficCapture/trafficReplayer/src/main/java/org/opensearch/migrations/replay/TrafficReplayerCore.java +++ b/TrafficCapture/trafficReplayer/src/main/java/org/opensearch/migrations/replay/TrafficReplayerCore.java @@ -111,7 +111,7 @@ public Consumer onRequestReceived( ); finishedAccumulatingResponseFuture.future.whenComplete( (v, t) -> log.atDebug() - .setMessage(() -> "Done receiving captured stream for {}:{}") + .setMessage("Done receiving captured stream for {}:{}") .addArgument(ctx) .addArgument(v.requestData) .log() @@ -160,9 +160,9 @@ private TrackedFuture sendReque ); httpSentRequestFuture.future.whenComplete( (v, t) -> log.atTrace() - .setMessage(() -> "Summary response value for " + requestKey + " returned=" + v) - .log() - ); + .setMessage("Summary response value for {} returned={}") + .addArgument(requestKey).addArgument(v) + .log()); return httpSentRequestFuture; } @@ -189,26 +189,19 @@ Void handleCompletedTransaction( commitTrafficStreams(rrPair.completionStatus, rrPair.trafficStreamKeysBeingHeld); return null; } else { - log.atError() - .setCause(t) - .setMessage(() -> "Throwable passed to handle() for " + context + ". Rethrowing.") - .log(); + log.atError().setCause(t) + .setMessage("Throwable passed to handle() for {}. Rethrowing.").addArgument(context).log(); throw Lombok.sneakyThrow(t); } } catch (Error error) { - log.atError() - .setCause(error) - .setMessage(() -> "Caught error and initiating TrafficReplayer shutdown") - .log(); + log.atError().setCause(error) + .setMessage("Caught error and initiating TrafficReplayer shutdown").log(); shutdown(error); throw error; } catch (Exception e) { - log.atError() - .setMessage( - "Unexpected exception while sending the " - + "aggregated response and context for {} to the callback. " - + "Proceeding, but the tuple receiver context may be compromised." - ) + log.atError().setMessage("Unexpected exception while sending the " + + "aggregated response and context for {} to the callback. " + + "Proceeding, but the tuple receiver context may be compromised.") .addArgument(context) .setCause(e) .log(); @@ -286,9 +279,7 @@ private void packageAndWriteResponse( } try (var requestResponseTuple = new SourceTargetCaptureTuple(tupleHandlingContext, rrPair, summary, t)) { log.atDebug() - .setMessage("{}") - .addArgument(() -> "Source/Target Request/Response tuple: " + requestResponseTuple) - .log(); + .setMessage("Source/Target Request/Response tuple: {}").addArgument(requestResponseTuple).log(); tupleWriter.accept(requestResponseTuple); } @@ -322,11 +313,8 @@ protected void perResponseConsumer(AggregatedRawResponse summary, HttpRequestTransformationStatus transformationStatus, IReplayContexts.IReplayerHttpTransactionContext context) { if (summary != null && summary.getError() != null) { - log.atInfo() - .setCause(summary.getError()) - .setMessage("Exception for {}: ") - .addArgument(context) - .log(); + log.atInfo().setCause(summary.getError()) + .setMessage("Exception for {}: ").addArgument(context).log(); exceptionRequestCount.incrementAndGet(); } else if (transformationStatus.isError()) { log.atInfo() @@ -336,8 +324,7 @@ protected void perResponseConsumer(AggregatedRawResponse summary, .log(); exceptionRequestCount.incrementAndGet(); } else if (summary == null) { - log.atInfo() - .setMessage("Not counting this response. No result at all for {}: ") + log.atInfo().setMessage("Not counting this response. No result at all for {}: ") .addArgument(context) .log(); } else { @@ -363,10 +350,8 @@ public void pullCaptureFromSourceToAccumulator( trafficStreams = this.nextChunkFutureRef.get().get(); } catch (ExecutionException ex) { if (ex.getCause() instanceof EOFException) { - log.atWarn() - .setCause(ex.getCause()) - .setMessage("Got an EOF on the stream. " + "Done reading traffic streams.") - .log(); + log.atWarn().setCause(ex.getCause()) + .setMessage("Got an EOF on the stream. " + "Done reading traffic streams.").log(); break; } else { log.atWarn().setCause(ex).setMessage("Done reading traffic streams due to exception.").log(); @@ -380,9 +365,7 @@ public void pullCaptureFromSourceToAccumulator( .collect(Collectors.joining(";")) ) .filter(s -> !s.isEmpty()) - .ifPresent( - s -> log.atInfo().setMessage("{}").addArgument("TrafficStream Summary: {" + s + "}").log() - ); + .ifPresent(s -> log.atInfo().setMessage("TrafficStream Summary: {{}}").addArgument(s).log()); } trafficStreams.forEach(trafficToHttpTransactionAccumulator::accept); } diff --git a/TrafficCapture/trafficReplayer/src/main/java/org/opensearch/migrations/replay/TrafficReplayerTopLevel.java b/TrafficCapture/trafficReplayer/src/main/java/org/opensearch/migrations/replay/TrafficReplayerTopLevel.java index 7b0c32037..56770800a 100644 --- a/TrafficCapture/trafficReplayer/src/main/java/org/opensearch/migrations/replay/TrafficReplayerTopLevel.java +++ b/TrafficCapture/trafficReplayer/src/main/java/org/opensearch/migrations/replay/TrafficReplayerTopLevel.java @@ -203,20 +203,18 @@ protected void wrapUpWorkAndEmitSummary( waitForRemainingWork(logLevel, timeout); break; } catch (TimeoutException e) { - log.atLevel(logLevel).log("Timed out while waiting for the remaining " + "requests to be finalized..."); + log.atLevel(logLevel).log("Timed out while waiting for the remaining requests to be finalized..."); logLevel = secondaryLogLevel; } } if (!requestWorkTracker.isEmpty() || exceptionRequestCount.get() > 0) { log.atWarn() - .setMessage( - "{} in-flight requests being dropped due to pending shutdown; " - + "{} requests to the target threw an exception; " - + "{} requests were successfully processed." - ) - .addArgument(requestWorkTracker.size()) - .addArgument(exceptionRequestCount.get()) - .addArgument(successfulRequestCount.get()) + .setMessage("{} in-flight requests being dropped due to pending shutdown; " + + "{} requests to the target threw an exception; " + + "{} requests were successfully processed.") + .addArgument(requestWorkTracker::size) + .addArgument(exceptionRequestCount::get) + .addArgument(successfulRequestCount::get) .log(); } else { log.info(successfulRequestCount.get() + " requests were successfully processed."); @@ -325,10 +323,8 @@ private void handleAlreadySetFinishedSignal() throws InterruptedException, Execu throw e; } } catch (Error t) { - log.atError() - .setCause(t) - .setMessage(() -> "Not waiting for all work to finish. " + "The TrafficReplayer is shutting down") - .log(); + log.atError().setCause(t) + .setMessage("Not waiting for all work to finish. The TrafficReplayer is shutting down").log(); throw t; } } @@ -339,21 +335,21 @@ protected static void writeStatusLogsForRemainingWork( UniqueReplayerRequestKey, TrackedFuture>[] allRemainingWorkArray ) { - log.atLevel(logLevel).log("All remaining work to wait on " + allRemainingWorkArray.length); + log.atLevel(logLevel).setMessage("All remaining work to wait on {}") + .addArgument(allRemainingWorkArray.length).log(); if (log.isInfoEnabled()) { LoggingEventBuilder loggingEventBuilderToUse = log.isTraceEnabled() ? log.atTrace() : log.atInfo(); long itemLimit = log.isTraceEnabled() ? Long.MAX_VALUE : MAX_ITEMS_TO_SHOW_FOR_LEFTOVER_WORK_AT_INFO_LEVEL; - loggingEventBuilderToUse.setMessage( - () -> " items: " - + Arrays.stream(allRemainingWorkArray) - .map( - kvp -> kvp.getKey() - + " --> " - + kvp.getValue().formatAsString(TrafficReplayerTopLevel::formatWorkItem) - ) - .limit(itemLimit) - .collect(Collectors.joining("\n")) - ).log(); + loggingEventBuilderToUse.setMessage(" items: {}") + .addArgument(() -> Arrays.stream(allRemainingWorkArray) + .map( + kvp -> kvp.getKey() + + " --> " + + kvp.getValue().formatAsString(TrafficReplayerTopLevel::formatWorkItem) + ) + .limit(itemLimit) + .collect(Collectors.joining("\n"))) + .log(); } } @@ -380,14 +376,11 @@ protected boolean shouldRetry() { @SneakyThrows @Override public @NonNull CompletableFuture shutdown(Error error) { - log.atWarn().setCause(error).setMessage(() -> "Shutting down " + this).log(); + log.atWarn().setCause(error).setMessage("Shutting down {}").addArgument(this).log(); shutdownReasonRef.compareAndSet(null, error); if (!shutdownFutureRef.compareAndSet(null, new CompletableFuture<>())) { - log.atError() - .setMessage( - () -> "Shutdown was already signaled by {}. " + "Ignoring this shutdown request due to {}." - ) - .addArgument(shutdownReasonRef.get()) + log.atError().setMessage("Shutdown was already signaled by {}. Ignoring this shutdown request due to {}.") + .addArgument(shutdownReasonRef::get) .addArgument(error) .log(); return shutdownFutureRef.get(); @@ -416,7 +409,7 @@ protected boolean shouldRetry() { } } var shutdownFuture = shutdownFutureRef.get(); - log.atWarn().setMessage(() -> "Shutdown setup has been initiated").log(); + log.atWarn().setMessage("Shutdown setup has been initiated").log(); return shutdownFuture; } diff --git a/TrafficCapture/trafficReplayer/src/main/java/org/opensearch/migrations/replay/datahandlers/JsonAccumulator.java b/TrafficCapture/trafficReplayer/src/main/java/org/opensearch/migrations/replay/datahandlers/JsonAccumulator.java index e875fd78d..3c57dbb6b 100644 --- a/TrafficCapture/trafficReplayer/src/main/java/org/opensearch/migrations/replay/datahandlers/JsonAccumulator.java +++ b/TrafficCapture/trafficReplayer/src/main/java/org/opensearch/migrations/replay/datahandlers/JsonAccumulator.java @@ -61,7 +61,7 @@ public Object consumeByteBufferForSingleObject(ByteBuffer byteBuffer) throws IOE } public void consumeByteBuffer(ByteBuffer byteBuffer) throws IOException { - log.atTrace().setMessage(() -> "Consuming bytes: {}").addArgument(() -> byteBuffer.toString()).log(); + log.atTrace().setMessage("Consuming bytes: {}").addArgument(byteBuffer::toString).log(); feeder.feedInput(byteBuffer); } @@ -73,7 +73,7 @@ public Object getNextTopLevelObject() throws IOException { break; } - log.atTrace().setMessage(() -> "{} ... adding token={}").addArgument(this).addArgument(token).log(); + log.atTrace().setMessage("{} ... adding token={}").addArgument(this).addArgument(token).log(); switch (token) { case FIELD_NAME: jsonObjectStack.push(parser.getText()); diff --git a/TrafficCapture/trafficReplayer/src/main/java/org/opensearch/migrations/replay/datahandlers/NettyPacketToHttpConsumer.java b/TrafficCapture/trafficReplayer/src/main/java/org/opensearch/migrations/replay/datahandlers/NettyPacketToHttpConsumer.java index 57fa4a4a3..3dfc2054c 100644 --- a/TrafficCapture/trafficReplayer/src/main/java/org/opensearch/migrations/replay/datahandlers/NettyPacketToHttpConsumer.java +++ b/TrafficCapture/trafficReplayer/src/main/java/org/opensearch/migrations/replay/datahandlers/NettyPacketToHttpConsumer.java @@ -91,9 +91,8 @@ public void channelInactive(ChannelHandlerContext ctx) throws Exception { @Override public void exceptionCaught(ChannelHandlerContext ctx, Throwable cause) throws Exception { socketContext.addTraceException(cause, true); - log.atDebug() - .setMessage("Exception caught in ConnectionClosedListenerHandler." + "Closing channel due to exception") - .setCause(cause) + log.atDebug().setCause(cause) + .setMessage("Exception caught in ConnectionClosedListenerHandler. Closing channel due to exception") .log(); ctx.close(); super.exceptionCaught(ctx, cause); @@ -109,7 +108,7 @@ public NettyPacketToHttpConsumer( var parentContext = ctx.createTargetRequestContext(); this.setCurrentMessageContext(parentContext.createHttpSendingContext()); responseBuilder = AggregatedRawResponse.builder(Instant.now()); - log.atDebug().setMessage(() -> "C'tor: incoming session=" + replaySession).log(); + log.atDebug().setMessage("C'tor: incoming session={}").addArgument(replaySession).log(); this.activeChannelFuture = activateLiveChannel(); this.readTimeoutDuration = readTimeoutDuration; } @@ -125,7 +124,7 @@ private TrackedFuture activateLiveChannel() { if (t != null) { channelCtx.addFailedChannelCreation(); channelCtx.addTraceException(channelFuture.cause(), true); - log.atWarn().setMessage(() -> "error creating channel, not retrying").setCause(t).log(); + log.atWarn().setCause(t).setMessage("error creating channel, not retrying").log(); throw Lombok.sneakyThrow(t); } @@ -133,15 +132,15 @@ private TrackedFuture activateLiveChannel() { if (c.isActive()) { this.channel = c; initializeRequestHandlers(); - log.atDebug().setMessage(() -> "Channel initialized for " + channelCtx + " signaling future").log(); + log.atDebug().setMessage("Channel initialized for {} signaling future") + .addArgument(channelCtx).log(); return TextTrackedFuture.completedFuture(null, () -> "Done"); } else { // this may recurse forever - until the event loop is shutdown // (see the ClientConnectionPool::shutdownNow()) channelCtx.addFailedChannelCreation(); log.atWarn() - .setMessage(() -> "Channel wasn't active, trying to create another for this request") - .log(); + .setMessage("Channel wasn't active, trying to create another for this request").log(); return activateLiveChannel(); } }, () -> "acting on ready channelFuture to retry if inactive or to return"), @@ -192,7 +191,10 @@ public static TrackedFuture createClientConnection( } String host = serverUri.getHost(); int port = serverUri.getPort(); - log.atTrace().setMessage(() -> "Active - setting up backend connection to " + host + ":" + port).log(); + log.atTrace().setMessage("Active - setting up backend connection to {}:{}") + .addArgument(host) + .addArgument(port) + .log(); Bootstrap b = new Bootstrap(); var channelKeyCtx = requestCtx.getLogicalEnclosingScope().getChannelKeyContext(); @@ -211,10 +213,8 @@ protected void initChannel(@NonNull Channel ch) throws Exception { try { var t = TrackedFuture.unwindPossibleCompletionException(tWrapped); if (t != null) { - log.atWarn() - .setMessage(() -> "Caught exception while trying to get an active channel") - .setCause(t) - .log(); + log.atWarn().setCause(t) + .setMessage("Caught exception while trying to get an active channel").log(); } else if (!outboundChannelFuture.channel().isActive()) { t = new ChannelNotActiveException(); } @@ -243,12 +243,9 @@ protected void initChannel(@NonNull Channel ch) throws Exception { ChannelFuture outboundChannelFuture) { final var channel = outboundChannelFuture.channel(); - log.atTrace() - .setMessage( - () -> channelKeyContext.getChannelKey() - + " Done setting up client channel & it was successful for " - + channel - ) + log.atTrace().setMessage("{} successfully done setting up client channel for {}") + .addArgument(channelKeyContext::getChannelKey) + .addArgument(channel) .log(); var pipeline = channel.pipeline(); if (sslContext != null) { @@ -324,7 +321,7 @@ private void initializeRequestHandlers() { addLoggingHandlerLast(pipeline, "D"); pipeline.addLast(BACKSIDE_HTTP_WATCHER_HANDLER_NAME, new BacksideHttpWatcherHandler(responseBuilder)); addLoggingHandlerLast(pipeline, "E"); - log.atTrace().setMessage(() -> "Added handlers to the pipeline: " + pipeline).log(); + log.atTrace().setMessage("Added handlers to the pipeline: {}").addArgument(pipeline).log(); channel.config().setAutoRead(true); } @@ -336,8 +333,7 @@ private static void addLoggingHandlerLast(ChannelPipeline pipeline, String name) private void deactivateChannel() { try { var pipeline = channel.pipeline(); - log.atDebug() - .setMessage(() -> "Resetting the pipeline for channel {} currently at: {}") + log.atDebug().setMessage("Resetting the pipeline for channel {} currently at: {}") .addArgument(channel) .addArgument(pipeline) .log(); @@ -345,9 +341,8 @@ private void deactivateChannel() { try { pipeline.remove(handlerName); } catch (NoSuchElementException e) { - log.atWarn() - .setMessage(() -> "Ignoring an exception that the " + handlerName + " wasn't present") - .log(); + log.atWarn().setMessage("Ignoring an exception that the {}} wasn't present") + .addArgument(handlerName).log(); } } while (true) { @@ -358,7 +353,10 @@ private void deactivateChannel() { pipeline.removeLast(); } channel.config().setAutoRead(false); - log.atDebug().setMessage(() -> "Reset the pipeline for channel " + channel + " back to: " + pipeline).log(); + log.atDebug().setMessage("Reset the pipeline for channel {} back to: {}") + .addArgument(channel) + .addArgument(pipeline) + .log(); } finally { getCurrentRequestSpan().close(); getParentContext().close(); @@ -369,26 +367,21 @@ private void deactivateChannel() { public TrackedFuture consumeBytes(ByteBuf packetData) { activeChannelFuture = activeChannelFuture.getDeferredFutureThroughHandle((v, channelException) -> { if (channelException == null) { - log.atTrace() - .setMessage("{}") - .addArgument( - () -> "outboundChannelFuture is ready. Writing packets (hash=" - + System.identityHashCode(packetData) - + "): " - + httpContext() - + ": " - + packetData.toString(StandardCharsets.UTF_8) - ) + log.atTrace().setMessage("outboundChannelFuture is ready. Writing packets (hash={}): {}: {}") + .addArgument(() -> System.identityHashCode(packetData)) + .addArgument(this::httpContext) + .addArgument(() -> packetData.toString(StandardCharsets.UTF_8)) .log(); return writePacketAndUpdateFuture(packetData).whenComplete((v2, t2) -> - log.atTrace().setMessage(() -> "finished writing " + httpContext() + " t=" + t2).log(), () -> ""); + log.atTrace().setMessage("finished writing {} t={}") + .addArgument(this::httpContext) + .addArgument(t2) + .log(), () -> ""); } else { log.atWarn() - .setMessage( - () -> httpContext().getReplayerRequestKey() - + "outbound channel was not set up successfully, NOT writing bytes hash=" - + System.identityHashCode(packetData) - ) + .setMessage("{} outbound channel was not set up successfully, NOT writing bytes hash={}") + .addArgument(() -> httpContext().getReplayerRequestKey()) + .addArgument(() -> System.identityHashCode(packetData)) .log(); if (channel != null) { channel.close(); @@ -397,14 +390,10 @@ public TrackedFuture consumeBytes(ByteBuf packetData) { } }, () -> "consumeBytes - after channel is fully initialized (potentially waiting on TLS handshake)"); log.atTrace() - .setMessage( - () -> "Setting up write of packetData[" - + packetData - + "] hash=" - + System.identityHashCode(packetData) - + ". Created future consumeBytes=" - + activeChannelFuture - ) + .setMessage("Setting up write of packetData[{}] hash={}. Created future consumeBytes={}") + .addArgument(packetData) + .addArgument(() -> System.identityHashCode(packetData)) + .addArgument(activeChannelFuture) .log(); return activeChannelFuture; } @@ -423,9 +412,8 @@ private TrackedFuture writePacketAndUpdateFuture(ByteBuf packetDat @Override public TrackedFuture finalizeRequest() { var ff = activeChannelFuture.getDeferredFutureThroughHandle((v, t) -> { - log.atDebug() - .setMessage(() -> "finalization running since all prior work has completed for " + httpContext()) - .log(); + log.atDebug().setMessage("finalization running since all prior work has completed for {}") + .addArgument(() -> httpContext()).log(); if (!(this.currentRequestContextUnion instanceof IReplayContexts.IReceivingHttpResponseContext)) { this.getCurrentRequestSpan().close(); this.setCurrentMessageContext(getParentContext().createWaitingForResponseContext()); @@ -445,7 +433,7 @@ public TrackedFuture finalizeRequest() { .map(f -> f.whenComplete((v, t) -> { try { if (channel == null) { - log.atTrace().setMessage(() -> + log.atTrace().setMessage( "finalizeRequest().whenComplete has no channel present that needs to be to deactivated.").log(); } else { deactivateChannel(); @@ -455,15 +443,10 @@ public TrackedFuture finalizeRequest() { getParentContext().close(); } }), () -> "clearing pipeline"); - log.atDebug() - .setMessage( - () -> "Chaining finalization work off of " - + activeChannelFuture - + " for " - + httpContext() - + ". Returning finalization future=" - + ff - ) + log.atDebug().setMessage("Chaining finalization work off of {} for {}. Returning finalization future={}") + .addArgument(activeChannelFuture) + .addArgument(() -> httpContext()) + .addArgument(ff) .log(); return ff; } diff --git a/TrafficCapture/trafficReplayer/src/main/java/org/opensearch/migrations/replay/datahandlers/http/HttpJsonTransformingConsumer.java b/TrafficCapture/trafficReplayer/src/main/java/org/opensearch/migrations/replay/datahandlers/http/HttpJsonTransformingConsumer.java index 81424e89a..c83f466ab 100644 --- a/TrafficCapture/trafficReplayer/src/main/java/org/opensearch/migrations/replay/datahandlers/http/HttpJsonTransformingConsumer.java +++ b/TrafficCapture/trafficReplayer/src/main/java/org/opensearch/migrations/replay/datahandlers/http/HttpJsonTransformingConsumer.java @@ -98,14 +98,9 @@ private HttpRequestDecoder getHttpRequestDecoderHandler() { public TrackedFuture consumeBytes(ByteBuf nextRequestPacket) { chunks.add(nextRequestPacket.retainedDuplicate()); chunkSizes.get(chunkSizes.size() - 1).add(nextRequestPacket.readableBytes()); - log.atTrace() - .setMessage("{}") - .addArgument( - () -> "HttpJsonTransformingConsumer[" - + this - + "]: writing into embedded channel: " - + nextRequestPacket.toString(StandardCharsets.UTF_8) - ) + log.atTrace().setMessage("HttpJsonTransformingConsumer[{}]: writing into embedded channel: {}") + .addArgument(this) + .addArgument(() -> nextRequestPacket.toString(StandardCharsets.UTF_8)) .log(); return TextTrackedFuture.completedFuture(null, () -> "initialValue") .map( diff --git a/TrafficCapture/trafficReplayer/src/main/java/org/opensearch/migrations/replay/datahandlers/http/NettyDecodedHttpRequestConvertHandler.java b/TrafficCapture/trafficReplayer/src/main/java/org/opensearch/migrations/replay/datahandlers/http/NettyDecodedHttpRequestConvertHandler.java index e1c4193dc..42e20345d 100644 --- a/TrafficCapture/trafficReplayer/src/main/java/org/opensearch/migrations/replay/datahandlers/http/NettyDecodedHttpRequestConvertHandler.java +++ b/TrafficCapture/trafficReplayer/src/main/java/org/opensearch/migrations/replay/datahandlers/http/NettyDecodedHttpRequestConvertHandler.java @@ -30,16 +30,11 @@ public void channelRead(@NonNull ChannelHandlerContext ctx, @NonNull Object msg) if (msg instanceof HttpRequest) { httpTransactionContext.onHeaderParse(); var request = (HttpRequest) msg; - log.atDebug() - .setMessage( - () -> diagnosticLabel - + " parsed request: " - + request.method() - + " " - + request.uri() - + " " - + request.protocolVersion().text() - ) + log.atDebug().setMessage("{} parsed request: {} {} {}") + .addArgument(diagnosticLabel) + .addArgument(request::method) + .addArgument(request::uri) + .addArgument(() -> request.protocolVersion().text()) .log(); var httpJsonMessage = parseHeadersIntoMessage(request); ctx.fireChannelRead(httpJsonMessage); diff --git a/TrafficCapture/trafficReplayer/src/main/java/org/opensearch/migrations/replay/datahandlers/http/NettyDecodedHttpResponseConvertHandler.java b/TrafficCapture/trafficReplayer/src/main/java/org/opensearch/migrations/replay/datahandlers/http/NettyDecodedHttpResponseConvertHandler.java index b1ab1aded..59846f698 100644 --- a/TrafficCapture/trafficReplayer/src/main/java/org/opensearch/migrations/replay/datahandlers/http/NettyDecodedHttpResponseConvertHandler.java +++ b/TrafficCapture/trafficReplayer/src/main/java/org/opensearch/migrations/replay/datahandlers/http/NettyDecodedHttpResponseConvertHandler.java @@ -30,16 +30,11 @@ public void channelRead(@NonNull ChannelHandlerContext ctx, @NonNull Object msg) if (msg instanceof HttpResponse) { httpTransactionContext.onHeaderParse(); var response = (HttpResponse) msg; - log.atInfo() - .setMessage( - () -> diagnosticLabel - + " parsed response: " - + response.status().code() - + " " - + response.status().reasonPhrase() - + " " - + response.protocolVersion().text() - ) + log.atInfo().setMessage("{} parsed response: {} {} {}") + .addArgument(diagnosticLabel) + .addArgument(() -> response.status().code()) + .addArgument(() -> response.status().reasonPhrase()) + .addArgument(() -> response.protocolVersion().text()) .log(); var httpJsonMessage = parseHeadersIntoMessage(response); ctx.fireChannelRead(httpJsonMessage); diff --git a/TrafficCapture/trafficReplayer/src/main/java/org/opensearch/migrations/replay/datahandlers/http/NettyJsonBodyAccumulateHandler.java b/TrafficCapture/trafficReplayer/src/main/java/org/opensearch/migrations/replay/datahandlers/http/NettyJsonBodyAccumulateHandler.java index f80112116..754ac2a4e 100644 --- a/TrafficCapture/trafficReplayer/src/main/java/org/opensearch/migrations/replay/datahandlers/http/NettyJsonBodyAccumulateHandler.java +++ b/TrafficCapture/trafficReplayer/src/main/java/org/opensearch/migrations/replay/datahandlers/http/NettyJsonBodyAccumulateHandler.java @@ -93,7 +93,7 @@ public void channelRead(ChannelHandlerContext ctx, Object msg) throws Exception } } } catch (JacksonException e) { - log.atInfo().setCause(e).setMessage(() -> "Error parsing json body. " + + log.atInfo().setCause(e).setMessage("Error parsing json body. " + "Will pass all payload bytes directly as a ByteBuf within the payload map").log(); jsonWasInvalid = true; parsedJsonObjects.clear(); @@ -139,8 +139,7 @@ public void channelRead(ChannelHandlerContext ctx, Object msg) throws Exception context.onTextPayloadParseSucceeded(); } catch (CharacterCodingException e) { context.onTextPayloadParseFailed(); - log.atDebug().setMessage("Payload not valid utf-8, fallback to binary") - .setCause(e).log(); + log.atDebug().setCause(e).setMessage("Payload not valid utf-8, fallback to binary").log(); context.onPayloadSetBinary(); capturedHttpJsonMessage.payload() .put(JsonKeysForHttpMessage.INLINED_BINARY_BODY_DOCUMENT_KEY, diff --git a/TrafficCapture/trafficReplayer/src/main/java/org/opensearch/migrations/replay/datahandlers/http/NettyJsonContentAuthSigner.java b/TrafficCapture/trafficReplayer/src/main/java/org/opensearch/migrations/replay/datahandlers/http/NettyJsonContentAuthSigner.java index d6a9bc794..109a591f3 100644 --- a/TrafficCapture/trafficReplayer/src/main/java/org/opensearch/migrations/replay/datahandlers/http/NettyJsonContentAuthSigner.java +++ b/TrafficCapture/trafficReplayer/src/main/java/org/opensearch/migrations/replay/datahandlers/http/NettyJsonContentAuthSigner.java @@ -55,12 +55,8 @@ private boolean flushDownstream(ChannelHandlerContext ctx) { public void handlerRemoved(ChannelHandlerContext ctx) throws Exception { boolean messageFlushed = flushDownstream(ctx); if (messageFlushed) { - log.atWarn() - .setMessage( - () -> "Failed to sign message due to handler removed" - + " before the end of the http contents were received" - ) - .log(); + log.atWarn().setMessage("Failed to sign message due to handler removed " + + "before the end of the http contents were received").log(); } super.handlerRemoved(ctx); } @@ -70,11 +66,8 @@ public void channelUnregistered(ChannelHandlerContext ctx) throws Exception { boolean messageFlushed = flushDownstream(ctx); if (messageFlushed) { log.atWarn() - .setMessage( - () -> "Failed to sign message due to channel unregistered" - + " before the end of the http contents were received" - ) - .log(); + .setMessage("Failed to sign message due to channel unregistered" + + " before the end of the http contents were received").log(); } super.channelUnregistered(ctx); } diff --git a/TrafficCapture/trafficReplayer/src/main/java/org/opensearch/migrations/replay/datahandlers/http/NettyJsonToByteBufHandler.java b/TrafficCapture/trafficReplayer/src/main/java/org/opensearch/migrations/replay/datahandlers/http/NettyJsonToByteBufHandler.java index 0e5a9e837..a68be242b 100644 --- a/TrafficCapture/trafficReplayer/src/main/java/org/opensearch/migrations/replay/datahandlers/http/NettyJsonToByteBufHandler.java +++ b/TrafficCapture/trafficReplayer/src/main/java/org/opensearch/migrations/replay/datahandlers/http/NettyJsonToByteBufHandler.java @@ -131,9 +131,9 @@ private void writeHeadersIntoByteBufs(ChannelHandlerContext ctx, HttpJsonRequest return; } } catch (Exception e) { - log.atWarn() - .setCause(e) - .setMessage(() -> "writing headers directly to chunks w/ sizes didn't work for " + httpJson) + log.atWarn().setCause(e) + .setMessage("writing headers directly to chunks w/ sizes didn't work for {}") + .addArgument(httpJson) .log(); } diff --git a/TrafficCapture/trafficReplayer/src/main/java/org/opensearch/migrations/replay/datahandlers/http/NettySendByteBufsToPacketHandlerHandler.java b/TrafficCapture/trafficReplayer/src/main/java/org/opensearch/migrations/replay/datahandlers/http/NettySendByteBufsToPacketHandlerHandler.java index 62e503b35..e470ef02e 100644 --- a/TrafficCapture/trafficReplayer/src/main/java/org/opensearch/migrations/replay/datahandlers/http/NettySendByteBufsToPacketHandlerHandler.java +++ b/TrafficCapture/trafficReplayer/src/main/java/org/opensearch/migrations/replay/datahandlers/http/NettySendByteBufsToPacketHandlerHandler.java @@ -156,33 +156,28 @@ public void channelRead(ChannelHandlerContext ctx, Object msg) { + System.identityHashCode(ctx) ); var bb = (ByteBuf) msg; - log.atTrace().setMessage(() -> "Send bb.refCnt=" + bb.refCnt() + " " + System.identityHashCode(bb)).log(); + log.atTrace().setMessage("Send bb.refCnt={} {}") + .addArgument(bb::refCnt) + .addArgument(() -> System.identityHashCode(bb)) + .log(); // I don't want to capture the *this* object, the preexisting value of the currentFuture field only final var preexistingFutureForCapture = currentFuture; var numBytesToSend = bb.readableBytes(); currentFuture = currentFuture.getDeferredFutureThroughHandle((v, t) -> { try { if (t != null) { - log.atInfo() - .setCause(t) - .setMessage( - () -> "got exception from a previous future that " - + "will prohibit sending any more data to the packetReceiver" - ) - .log(); + log.atInfo().setCause(t).setMessage("got exception from a previous future that " + + "will prohibit sending any more data to the packetReceiver").log(); return TextTrackedFuture.failedFuture(t, () -> "failed previous future"); } else { - log.atTrace() - .setMessage( - () -> "chaining consumingBytes with " - + msg - + " lastFuture=" - + preexistingFutureForCapture - ) + log.atTrace().setMessage("chaining consumingBytes with {} lastFuture={}") + .addArgument(msg) + .addArgument(preexistingFutureForCapture) .log(); var rval = packetReceiver.consumeBytes(bb); - log.atTrace() - .setMessage(() -> "packetReceiver.consumeBytes()=" + rval + " bb.refCnt=" + bb.refCnt()) + log.atTrace().setMessage("packetReceiver.consumeBytes()={} bb.refCnt={}") + .addArgument(rval) + .addArgument(bb::refCnt) .log(); return rval.map( cf -> cf.thenApply(ignore -> false), diff --git a/TrafficCapture/trafficReplayer/src/main/java/org/opensearch/migrations/replay/http/retries/OpenSearchDefaultRetry.java b/TrafficCapture/trafficReplayer/src/main/java/org/opensearch/migrations/replay/http/retries/OpenSearchDefaultRetry.java index 7cf32a854..d90d75781 100644 --- a/TrafficCapture/trafficReplayer/src/main/java/org/opensearch/migrations/replay/http/retries/OpenSearchDefaultRetry.java +++ b/TrafficCapture/trafficReplayer/src/main/java/org/opensearch/migrations/replay/http/retries/OpenSearchDefaultRetry.java @@ -53,7 +53,8 @@ boolean hadNoErrors() { @Override public void channelRead(@NonNull ChannelHandlerContext ctx, @NonNull Object msg) throws Exception { if (msg instanceof HttpContent && errorField == null) { - log.atDebug().setMessage(() -> "body contents: " + ((HttpContent) msg).content().duplicate().toString()).log(); + log.atDebug().setMessage("body contents: {}") + .addArgument(((HttpContent) msg).content().duplicate()).log(); feeder.feedInput(((HttpContent) msg).content().nioBuffer()); consumeInput(); if (msg instanceof LastHttpContent) { @@ -74,7 +75,7 @@ private void consumeInput() throws IOException { token != JsonToken.NOT_AVAILABLE) { JsonToken finalToken = token; - log.atTrace().setMessage(() -> "Got token: " + finalToken).log(); + log.atTrace().setMessage("Got token: {}").addArgument(finalToken).log(); if (token == JsonToken.FIELD_NAME && "errors".equals(parser.getCurrentName())) { parser.nextToken(); errorField = parser.getValueAsBoolean(); diff --git a/TrafficCapture/trafficReplayer/src/main/java/org/opensearch/migrations/replay/kafka/KafkaTrafficCaptureSource.java b/TrafficCapture/trafficReplayer/src/main/java/org/opensearch/migrations/replay/kafka/KafkaTrafficCaptureSource.java index 25a898c25..362e0b396 100644 --- a/TrafficCapture/trafficReplayer/src/main/java/org/opensearch/migrations/replay/kafka/KafkaTrafficCaptureSource.java +++ b/TrafficCapture/trafficReplayer/src/main/java/org/opensearch/migrations/replay/kafka/KafkaTrafficCaptureSource.java @@ -236,11 +236,10 @@ public List readNextTrafficStreamSynchronously( try { TrafficStream ts = TrafficStream.parseFrom(kafkaRecord.value()); var trafficStreamsSoFar = trafficStreamsRead.incrementAndGet(); - log.atTrace() - .setMessage("{}") - .addArgument( - () -> "Parsed traffic stream #" + trafficStreamsSoFar + ": " + offsetData + " " + ts - ) + log.atTrace().setMessage("Parsed traffic stream #{}: {} {}") + .addArgument(trafficStreamsSoFar) + .addArgument(offsetData) + .addArgument(ts) .log(); var key = new TrafficStreamKeyWithKafkaRecordId(tsk -> { var channelKeyCtx = channelContextManager.retainOrCreateContext(tsk); diff --git a/TrafficCapture/trafficReplayer/src/main/java/org/opensearch/migrations/replay/kafka/OffsetLifecycleTracker.java b/TrafficCapture/trafficReplayer/src/main/java/org/opensearch/migrations/replay/kafka/OffsetLifecycleTracker.java index f96f02d4f..4a884bf2b 100644 --- a/TrafficCapture/trafficReplayer/src/main/java/org/opensearch/migrations/replay/kafka/OffsetLifecycleTracker.java +++ b/TrafficCapture/trafficReplayer/src/main/java/org/opensearch/migrations/replay/kafka/OffsetLifecycleTracker.java @@ -56,12 +56,16 @@ Optional removeAndReturnNewHead(long offsetToRemove) { if (offsetToRemove == topCursor) { topCursor = Optional.ofNullable(pQueue.peek()).orElse(cursorHighWatermark + 1); // most recent cursor // was previously popped - log.atDebug() - .setMessage("Commit called for " + offsetToRemove + ", and new topCursor=" + topCursor) + log.atDebug().setMessage("Commit called for {}, and new topCursor={}") + .addArgument(offsetToRemove) + .addArgument(topCursor) .log(); return Optional.of(topCursor); } else { - log.atDebug().setMessage("Commit called for " + offsetToRemove + ", but topCursor=" + topCursor).log(); + log.atDebug().setMessage("Commit called for {}, but topCursor={}") + .addArgument(offsetToRemove) + .addArgument(topCursor) + .log(); return Optional.empty(); } } diff --git a/TrafficCapture/trafficReplayer/src/main/java/org/opensearch/migrations/replay/kafka/TrackingKafkaConsumer.java b/TrafficCapture/trafficReplayer/src/main/java/org/opensearch/migrations/replay/kafka/TrackingKafkaConsumer.java index f2f8fc277..acb4aa759 100644 --- a/TrafficCapture/trafficReplayer/src/main/java/org/opensearch/migrations/replay/kafka/TrackingKafkaConsumer.java +++ b/TrafficCapture/trafficReplayer/src/main/java/org/opensearch/migrations/replay/kafka/TrackingKafkaConsumer.java @@ -136,7 +136,7 @@ public TrackingKafkaConsumer( @Override public void onPartitionsRevoked(Collection partitions) { if (partitions.isEmpty()) { - log.atDebug().setMessage(() -> this + " revoked no partitions.").log(); + log.atDebug().setMessage("{} revoked no partitions.").addArgument(this).log(); return; } @@ -153,12 +153,9 @@ public void onPartitionsRevoked(Collection partitions) { partitionToOffsetLifecycleTrackerMap.values().stream().mapToInt(OffsetLifecycleTracker::size).sum() ); kafkaRecordsReadyToCommit.set(!nextSetOfCommitsMap.values().isEmpty()); - log.atWarn() - .setMessage( - () -> this - + " partitions revoked for " - + partitions.stream().map(String::valueOf).collect(Collectors.joining(",")) - ) + log.atWarn().setMessage("{} partitions revoked for {}") + .addArgument(this) + .addArgument(() -> partitions.stream().map(String::valueOf).collect(Collectors.joining(","))) .log(); } } @@ -166,7 +163,7 @@ public void onPartitionsRevoked(Collection partitions) { @Override public void onPartitionsAssigned(Collection newPartitions) { if (newPartitions.isEmpty()) { - log.atInfo().setMessage(() -> this + " assigned no new partitions.").log(); + log.atInfo().setMessage("{} assigned no new partitions.").addArgument(this).log(); return; } @@ -180,22 +177,17 @@ public void onPartitionsAssigned(Collection newPartitions) { ) ); log.atInfo() - .setMessage( - () -> this - + " partitions added for " - + newPartitions.stream().map(String::valueOf).collect(Collectors.joining(",")) - ) + .setMessage("{} partitions added for {}") + .addArgument(this) + .addArgument(() -> newPartitions.stream().map(String::valueOf).collect(Collectors.joining(","))) .log(); } } public void close() { log.atInfo() - .setMessage( - () -> "Kafka consumer closing. " - + "Committing (implicitly by Kafka's consumer): " - + nextCommitsToString() - ) + .setMessage("Kafka consumer closing. Committing (implicitly by Kafka's consumer): {}") + .addArgument(this::nextCommitsToString) .log(); kafkaConsumer.close(); } @@ -205,13 +197,9 @@ public Optional getNextRequiredTouch() { var r = kafkaRecordsLeftToCommitEventually.get() == 0 ? Optional.empty() : Optional.of(kafkaRecordsReadyToCommit.get() ? Instant.now() : lastTouchTime.plus(keepAliveInterval)); - log.atTrace() - .setMessage( - () -> "returning next required touch at " - + r.map(t -> "" + t).orElse("N/A") - + " from a lastTouchTime of " - + lastTouchTime - ) + log.atTrace().setMessage("returning next required touch at {} from a lastTouchTime of {}") + .addArgument(() -> r.map(Instant::toString).orElse("N/A")) + .addArgument(lastTouchTime) .log(); return r; } @@ -231,14 +219,10 @@ public void touch(ITrafficSourceContexts.IBackPressureBlockContext context) { } catch (IllegalStateException e) { throw e; } catch (RuntimeException e) { - log.atWarn() - .setCause(e) - .setMessage( - "Unable to poll the topic: " - + topic - + " with our Kafka consumer. " - + "Swallowing and awaiting next metadata refresh to try again." - ) + log.atWarn().setCause(e) + .setMessage("Unable to poll the topic: {} with our Kafka consumer. " + + "Swallowing and awaiting next metadata refresh to try again.") + .addArgument(topic) .log(); } finally { resume(); @@ -256,18 +240,14 @@ private void pause() { log.atError() .setCause(e) .setMessage( - () -> "Unable to pause the topic partitions: " - + topic - + ". " - + "The active partitions passed here : " - + activePartitions.stream().map(String::valueOf).collect(Collectors.joining(",")) - + ". " - + "The active partitions as tracked here are: " - + getActivePartitions().stream().map(String::valueOf).collect(Collectors.joining(",")) - + ". " - + "The active partitions according to the consumer: " - + kafkaConsumer.assignment().stream().map(String::valueOf).collect(Collectors.joining(",")) - ) + () -> "Unable to pause the topic partitions: {}. " + + "The active partitions passed here : {}. " + + "The active partitions as tracked here are: {}. " + + "The active partitions according to the consumer: {}") + .addArgument(topic) + .addArgument(() -> activePartitions.stream().map(String::valueOf).collect(Collectors.joining(","))) + .addArgument(() -> getActivePartitions().stream().map(String::valueOf).collect(Collectors.joining(","))) + .addArgument(() -> kafkaConsumer.assignment().stream().map(String::valueOf).collect(Collectors.joining(","))) .log(); } } @@ -279,21 +259,17 @@ private void resume() { } catch (IllegalStateException e) { log.atError() .setCause(e) - .setMessage( - () -> "Unable to resume the topic partitions: " - + topic - + ". " - + "This may not be a fatal error for the entire process as the consumer should eventually" - + " rejoin and rebalance. " - + "The active partitions passed here : " - + activePartitions.stream().map(String::valueOf).collect(Collectors.joining(",")) - + ". " - + "The active partitions as tracked here are: " - + getActivePartitions().stream().map(String::valueOf).collect(Collectors.joining(",")) - + ". " - + "The active partitions according to the consumer: " - + kafkaConsumer.assignment().stream().map(String::valueOf).collect(Collectors.joining(",")) + .setMessage("Unable to resume the topic partitions: {}. " + + "This may not be a fatal error for the entire process as the consumer should eventually " + + " rejoin and rebalance. " + + "The active partitions passed here : {}. " + + "The active partitions as tracked here are: {}. " + + "The active partitions according to the consumer: {}" ) + .addArgument(topic) + .addArgument(() -> activePartitions.stream().map(String::valueOf).collect(Collectors.joining(","))) + .addArgument(() -> getActivePartitions().stream().map(String::valueOf).collect(Collectors.joining(","))) + .addArgument(() -> kafkaConsumer.assignment().stream().map(String::valueOf).collect(Collectors.joining(","))) .log(); } } @@ -328,7 +304,7 @@ private Stream applyBuilder( ); offsetTracker.add(offsetDetails.getOffset()); kafkaRecordsLeftToCommitEventually.incrementAndGet(); - log.atTrace().setMessage(() -> "records in flight=" + kafkaRecordsLeftToCommitEventually.get()).log(); + log.atTrace().setMessage("records in flight={}").addArgument(kafkaRecordsLeftToCommitEventually::get).log(); return builder.apply(offsetDetails, kafkaRecord); }); } @@ -343,44 +319,28 @@ private ConsumerRecords safePollWithSwallowedRuntimeExceptions( records = kafkaConsumer.poll(keepAliveInterval.dividedBy(POLL_TIMEOUT_KEEP_ALIVE_DIVISOR)); } log.atLevel(records.isEmpty() ? Level.TRACE : Level.INFO) - .setMessage( - () -> "Kafka consumer poll has fetched " - + records.count() - + " records. " - + "Records in flight=" - + kafkaRecordsLeftToCommitEventually.get() - ) + .setMessage("Kafka consumer poll has fetched {} records. Records in flight={}") + .addArgument(records::count) + .addArgument(kafkaRecordsLeftToCommitEventually::get) .log(); - log.atTrace() - .setMessage("{}") - .addArgument( - () -> "All positions: {" - + kafkaConsumer.assignment() - .stream() - .map(tp -> tp + ": " + kafkaConsumer.position(tp)) - .collect(Collectors.joining(",")) - + "}" - ) + log.atTrace().setMessage("All positions: {{}}") + .addArgument(() -> + kafkaConsumer.assignment() + .stream() + .map(tp -> tp + ": " + kafkaConsumer.position(tp)) + .collect(Collectors.joining(","))) .log(); - log.atTrace() - .setMessage("{}") - .addArgument( - () -> "All previously COMMITTED positions: {" - + kafkaConsumer.assignment() - .stream() - .map(tp -> tp + ": " + kafkaConsumer.committed(tp)) - .collect(Collectors.joining(",")) - + "}" - ) + log.atTrace().setMessage("All previously COMMITTED positions: {{}}") + .addArgument(() -> kafkaConsumer.assignment() + .stream() + .map(tp -> tp + ": " + kafkaConsumer.committed(tp)) + .collect(Collectors.joining(","))) .log(); return records; } catch (RuntimeException e) { - log.atWarn() - .setCause(e) - .setMessage( - "Unable to poll the topic: {} with our Kafka consumer. " - + "Swallowing and awaiting next metadata refresh to try again." - ) + log.atWarn().setCause(e) + .setMessage("Unable to poll the topic: {} with our Kafka consumer. " + + "Swallowing and awaiting next metadata refresh to try again.") .addArgument(topic) .log(); return new ConsumerRecords<>(Collections.emptyMap()); @@ -395,16 +355,14 @@ ITrafficCaptureSource.CommitResult commitKafkaKey(ITrafficStreamKey streamKey, K if (tracker == null || tracker.consumerConnectionGeneration != kafkaTsk.getGeneration()) { log.atWarn() .setMessage( - () -> "trafficKey's generation (" - + kafkaTsk.getGeneration() - + ") is not current (" - + (Optional.ofNullable(tracker) - .map(t -> "new generation=" + t.consumerConnectionGeneration) - .orElse("Partition unassigned")) - + "). Dropping this commit request since the record would " - + "have been handled again by a current consumer within this process or another. Full key=" - + kafkaTsk - ) + () -> "trafficKey's generation ({}) is not current ({})." + + " Dropping this commit request since the record would " + + "have been handled again by a current consumer within this process or another. Full key={}") + .addArgument(kafkaTsk::getGeneration) + .addArgument((Optional.ofNullable(tracker) + .map(t -> "new generation=" + t.consumerConnectionGeneration) + .orElse("Partition unassigned"))) + .addArgument(kafkaTsk) .log(); return ITrafficCaptureSource.CommitResult.IGNORED; } @@ -417,7 +375,7 @@ ITrafficCaptureSource.CommitResult commitKafkaKey(ITrafficStreamKey streamKey, K newHeadValue = tracker.removeAndReturnNewHead(kafkaTsk.getOffset()); return newHeadValue.map(o -> { var v = new OffsetAndMetadata(o); - log.atDebug().setMessage(() -> "Adding new commit " + k + "->" + v + " to map").log(); + log.atDebug().setMessage("Adding new commit {}->{} to map").addArgument(k).addArgument(v).log(); synchronized (commitDataLock) { addKeyContextForEventualCommit(streamKey, kafkaTsk, k); nextSetOfCommitsMap.put(k, v); @@ -471,25 +429,22 @@ private void safeCommit(Supplier com kafkaRecordsLeftToCommitEventually.set( partitionToOffsetLifecycleTrackerMap.values().stream().mapToInt(OffsetLifecycleTracker::size).sum() ); - log.atDebug() - .setMessage(() -> "Done committing now records in flight=" + kafkaRecordsLeftToCommitEventually.get()) + log.atDebug().setMessage("Done committing now records in flight={}") + .addArgument(kafkaRecordsLeftToCommitEventually::get) .log(); } catch (RuntimeException e) { - log.atWarn() - .setCause(e) - .setMessage( - () -> "Error while committing. " + log.atWarn().setCause(e).setMessage("Error while committing. " + "Another consumer may already be processing messages before these commits. " + "Commits ARE NOT being discarded here, with the expectation that the revoked callback " + "(onPartitionsRevoked) will be called. " + "Within that method, commits for unassigned partitions will be discarded. " + "After that, touch() or poll() will trigger another commit attempt." - + "Those calls will occur in the near future if assigned partitions have pending commits." - + nextSetOfCommitsMap.entrySet() - .stream() - .map(kvp -> kvp.getKey() + "->" + kvp.getValue()) - .collect(Collectors.joining(",")) + + "Those calls will occur in the near future if assigned partitions have pending commits.{}" ) + .addArgument(() -> nextSetOfCommitsMap.entrySet() + .stream() + .map(kvp -> kvp.getKey() + "->" + kvp.getValue()) + .collect(Collectors.joining(","))) .log(); } finally { if (context != null) { @@ -504,7 +459,7 @@ private static void safeCommitStatic( HashMap nextCommitsMap ) { assert !nextCommitsMap.isEmpty(); - log.atDebug().setMessage(() -> "Committing " + nextCommitsMap).log(); + log.atDebug().setMessage("Committing {}").addArgument(nextCommitsMap).log(); try (var kafkaContext = context.createNewKafkaCommitContext()) { kafkaConsumer.commitSync(nextCommitsMap); } diff --git a/TrafficCapture/trafficReplayer/src/main/java/org/opensearch/migrations/replay/netty/BacksideHttpWatcherHandler.java b/TrafficCapture/trafficReplayer/src/main/java/org/opensearch/migrations/replay/netty/BacksideHttpWatcherHandler.java index b578bd821..8f8b8a683 100644 --- a/TrafficCapture/trafficReplayer/src/main/java/org/opensearch/migrations/replay/netty/BacksideHttpWatcherHandler.java +++ b/TrafficCapture/trafficReplayer/src/main/java/org/opensearch/migrations/replay/netty/BacksideHttpWatcherHandler.java @@ -61,9 +61,8 @@ public void exceptionCaught(ChannelHandlerContext ctx, Throwable cause) throws E } private void triggerResponseCallbackAndRemoveCallback() { - log.atTrace() - .setMessage(() -> "triggerResponseCallbackAndRemoveCallback, callback=" + this.responseCallback) - .log(); + log.atTrace().setMessage("triggerResponseCallbackAndRemoveCallback, callback={}") + .addArgument(this.responseCallback).log(); doneReadingRequest = true; if (this.responseCallback != null) { // this method may be re-entrant upon calling the callback, so make sure that we don't loop diff --git a/TrafficCapture/trafficReplayer/src/main/java/org/opensearch/migrations/replay/traffic/expiration/BehavioralPolicy.java b/TrafficCapture/trafficReplayer/src/main/java/org/opensearch/migrations/replay/traffic/expiration/BehavioralPolicy.java index c50658b0c..6d71823af 100644 --- a/TrafficCapture/trafficReplayer/src/main/java/org/opensearch/migrations/replay/traffic/expiration/BehavioralPolicy.java +++ b/TrafficCapture/trafficReplayer/src/main/java/org/opensearch/migrations/replay/traffic/expiration/BehavioralPolicy.java @@ -60,18 +60,15 @@ public void onNewDataArrivingAfterItsAccumulationHasBeenExpired( + extraGap + (hintString == null ? "" : " " + hintString) ); - log.atInfo() - .setMessage( - () -> "New data has arrived for " - + formatPartitionAndConnectionIds(trafficStreamKey) - + ", but during the processing of this Accumulation object, the Accumulation was expired. " - + "The maximum timestamp that would NOT have triggered expirations of previously observed data is " - + endOfLastWindow - + " and the last timestamp of the reference packet was " - + packetTimestamp - + ". To remedy this, set the minimumGuaranteedLifetime to at least " - + extraGap - ) + log.atInfo().setMessage("New data has arrived for {}," + + " but during the processing of this Accumulation object, the Accumulation was expired. " + + "The maximum timestamp that would NOT have triggered expirations of previously observed data is {}" + + " and the last timestamp of the reference packet was {}. " + + " To remedy this, set the minimumGuaranteedLifetime to at least {}") + .addArgument(() -> formatPartitionAndConnectionIds(trafficStreamKey)) + .addArgument(endOfLastWindow) + .addArgument(packetTimestamp) + .addArgument(extraGap) .log(); } diff --git a/TrafficCapture/trafficReplayer/src/main/java/org/opensearch/migrations/replay/traffic/source/BlockingTrafficSource.java b/TrafficCapture/trafficReplayer/src/main/java/org/opensearch/migrations/replay/traffic/source/BlockingTrafficSource.java index b25da73f2..7a2fc3a24 100644 --- a/TrafficCapture/trafficReplayer/src/main/java/org/opensearch/migrations/replay/traffic/source/BlockingTrafficSource.java +++ b/TrafficCapture/trafficReplayer/src/main/java/org/opensearch/migrations/replay/traffic/source/BlockingTrafficSource.java @@ -77,25 +77,18 @@ public void stopReadsPast(Instant pointInTime) { var newValue = Utils.setIfLater(stopReadingAtRef, prospectiveBarrier); if (newValue.equals(prospectiveBarrier)) { log.atLevel(Level.TRACE) - .setMessage( - () -> "Releasing the block on readNextTrafficStreamChunk and set" - + " the new stopReadingAtRef=" - + newValue - ) + .setMessage("Releasing the block on readNextTrafficStreamChunk and set the new stopReadingAtRef={}") + .addArgument(newValue) .log(); // No reason to signal more than one reader. We don't support concurrent reads with the current contract readGate.drainPermits(); readGate.release(); } else { log.atTrace() - .setMessage( - () -> "stopReadsPast: " - + pointInTime - + " [buffer=" - + prospectiveBarrier - + "] didn't move the cursor because the value was already at " - + newValue - ) + .setMessage("stopReadsPast: {} [buffer={}] didn't move the cursor because the value was already at {}") + .addArgument(pointInTime) + .addArgument(prospectiveBarrier) + .addArgument(newValue) .log(); } } @@ -127,11 +120,8 @@ public CompletableFuture> readNextTrafficStreamChunk .map(TrafficStreamUtils::instantFromProtoTimestamp) .orElse(Instant.EPOCH); Utils.setIfLater(lastTimestampSecondsRef, maxLocallyObservedTimestamp); - log.atTrace() - .setMessage( - () -> "end of readNextTrafficStreamChunk trigger...lastTimestampSecondsRef=" - + lastTimestampSecondsRef.get() - ) + log.atTrace().setMessage("end of readNextTrafficStreamChunk trigger...lastTimestampSecondsRef={}") + .addArgument(lastTimestampSecondsRef::get) .log(); }); } @@ -147,10 +137,9 @@ private Void blockIfNeeded(ITrafficSourceContexts.IReadChunkContext readContext) if (stopReadingAtRef.get().equals(Instant.EPOCH)) { return null; } - log.atTrace() - .setMessage( - () -> "stopReadingAtRef=" + stopReadingAtRef + " lastTimestampSecondsRef=" + lastTimestampSecondsRef - ) + log.atTrace().setMessage("stopReadingAtRef={} lastTimestampSecondsRef={}") + .addArgument(stopReadingAtRef) + .addArgument(lastTimestampSecondsRef) .log(); ITrafficSourceContexts.IBackPressureBlockContext blockContext = null; while (stopReadingAtRef.get().isBefore(lastTimestampSecondsRef.get())) { @@ -158,10 +147,9 @@ private Void blockIfNeeded(ITrafficSourceContexts.IReadChunkContext readContext) blockContext = readContext.createBackPressureContext(); } try { - log.atTrace() - .setMessage("blocking until signaled to read the next chunk last={} stop={}") - .addArgument(lastTimestampSecondsRef.get()) - .addArgument(stopReadingAtRef.get()) + log.atTrace().setMessage("blocking until signaled to read the next chunk last={} stop={}") + .addArgument(lastTimestampSecondsRef::get) + .addArgument(stopReadingAtRef::get) .log(); var nextTouchOp = underlyingSource.getNextRequiredTouch(); if (nextTouchOp.isEmpty()) { @@ -173,16 +161,10 @@ private Void blockIfNeeded(ITrafficSourceContexts.IReadChunkContext readContext) var nextInstant = nextTouchOp.get(); final var nowTime = Instant.now(); var waitIntervalMs = Duration.between(nowTime, nextInstant).toMillis(); - log.atDebug() - .setMessage( - () -> "Next touch at " - + nextInstant - + " ... in " - + waitIntervalMs - + "ms (now=" - + nowTime - + ")" - ) + log.atDebug().setMessage("Next touch at {} ... in {}ms (now={})") + .addArgument(nextInstant) + .addArgument(waitIntervalMs) + .addArgument(nowTime) .log(); if (waitIntervalMs <= 0) { underlyingSource.touch(blockContext); @@ -190,17 +172,17 @@ private Void blockIfNeeded(ITrafficSourceContexts.IReadChunkContext readContext) // if this doesn't succeed, we'll loop around & likely do a touch, then loop around again. // if it DOES succeed, we'll loop around and make sure that there's not another reason to stop log.atTrace() - .setMessage(() -> "acquiring readGate semaphore with timeout=" + waitIntervalMs) - .log(); + .setMessage("acquiring readGate semaphore with timeout={}") + .addArgument(waitIntervalMs).log(); try (var waitContext = blockContext.createWaitForSignalContext()) { var didAcquire = readGate.tryAcquire(waitIntervalMs, TimeUnit.MILLISECONDS); - log.atTrace().setMessage("semaphore ") + log.atTrace().setMessage("semaphore {}") .addArgument(() -> (didAcquire ? "" : "not ") + "acquired").log(); } } } } catch (InterruptedException e) { - log.atWarn().setCause(e).log("Interrupted while waiting to read more data"); + log.atWarn().setCause(e).setMessage("Interrupted while waiting to read more data").log(); Thread.currentThread().interrupt(); break; } diff --git a/TrafficCapture/trafficReplayer/src/main/java/org/opensearch/migrations/replay/traffic/source/InputStreamOfTraffic.java b/TrafficCapture/trafficReplayer/src/main/java/org/opensearch/migrations/replay/traffic/source/InputStreamOfTraffic.java index de95adeab..dcd0f7de3 100644 --- a/TrafficCapture/trafficReplayer/src/main/java/org/opensearch/migrations/replay/traffic/source/InputStreamOfTraffic.java +++ b/TrafficCapture/trafficReplayer/src/main/java/org/opensearch/migrations/replay/traffic/source/InputStreamOfTraffic.java @@ -39,9 +39,9 @@ public InputStreamOfTraffic(RootReplayerContext context, InputStream inputStream try { isr.close(); } catch (Exception e2) { - log.atError().setMessage("Caught exception while closing InputStreamReader that " + + log.atError().setCause(e2).setMessage("Caught exception while closing InputStreamReader that " + "was in response to an earlier thrown exception. Swallowing the inner exception and " + - "throwing the original one.").setCause(e2).log(); + "throwing the original one.").log(); } throw e; } diff --git a/TrafficCapture/trafficReplayer/src/main/java/org/opensearch/migrations/replay/traffic/source/TrafficStreamLimiter.java b/TrafficCapture/trafficReplayer/src/main/java/org/opensearch/migrations/replay/traffic/source/TrafficStreamLimiter.java index 31c272e8d..04879cd76 100644 --- a/TrafficCapture/trafficReplayer/src/main/java/org/opensearch/migrations/replay/traffic/source/TrafficStreamLimiter.java +++ b/TrafficCapture/trafficReplayer/src/main/java/org/opensearch/migrations/replay/traffic/source/TrafficStreamLimiter.java @@ -45,20 +45,15 @@ private void consumeFromQueue() { try { while (!stopped.get()) { workItem = workQueue.take(); - log.atDebug() - .setMessage(() -> "liveTrafficStreamCostGate.permits: {} acquiring: {}") - .addArgument(liveTrafficStreamCostGate.availablePermits()) + log.atDebug().setMessage("liveTrafficStreamCostGate.permits: {} acquiring: {}") + .addArgument(liveTrafficStreamCostGate::availablePermits) .addArgument(workItem.cost) .log(); liveTrafficStreamCostGate.acquire(workItem.cost); WorkItem finalWorkItem = workItem; - log.atDebug() - .setMessage( - () -> "Acquired liveTrafficStreamCostGate (available=" - + liveTrafficStreamCostGate.availablePermits() - + ") to process " - + finalWorkItem.context - ) + log.atDebug().setMessage("Acquired liveTrafficStreamCostGate (available={}) to process {}") + .addArgument(finalWorkItem.context) + .addArgument(liveTrafficStreamCostGate::availablePermits) .log(); workItem.task.accept(workItem); workItem = null; @@ -66,13 +61,9 @@ private void consumeFromQueue() { } catch (InterruptedException e) { if (!stopped.get()) { WorkItem finalWorkItem = workItem; - log.atError() - .setMessage( - () -> "consumeFromQueue() was interrupted with " - + (finalWorkItem != null ? "an active task and " : "") - + workQueue.size() - + " enqueued items" - ) + log.atError().setMessage("consumeFromQueue() was interrupted with {} enqueued items") + .addArgument(() -> (finalWorkItem != null ? "an active task and " : "")) + .addArgument(workQueue::size) .log(); } throw e; @@ -88,15 +79,10 @@ public WorkItem queueWork(int cost, IHttpTransactionContext context, @NonNull Co public void doneProcessing(@NonNull WorkItem workItem) { liveTrafficStreamCostGate.release(workItem.cost); - log.atDebug() - .setMessage( - () -> "released " - + workItem.cost - + " liveTrafficStreamCostGate.availablePermits=" - + liveTrafficStreamCostGate.availablePermits() - + " for " - + workItem.context - ) + log.atDebug().setMessage("released {} liveTrafficStreamCostGate.availablePermits={} for {}") + .addArgument(workItem.cost) + .addArgument(liveTrafficStreamCostGate::availablePermits) + .addArgument(workItem.context) .log(); } diff --git a/TrafficCapture/trafficReplayer/src/test/java/org/opensearch/migrations/replay/BlockingTrafficSourceTest.java b/TrafficCapture/trafficReplayer/src/test/java/org/opensearch/migrations/replay/BlockingTrafficSourceTest.java index d99be87b6..c8bd5dd96 100644 --- a/TrafficCapture/trafficReplayer/src/test/java/org/opensearch/migrations/replay/BlockingTrafficSourceTest.java +++ b/TrafficCapture/trafficReplayer/src/test/java/org/opensearch/migrations/replay/BlockingTrafficSourceTest.java @@ -88,7 +88,7 @@ private static class TestTrafficCaptureSource implements ISimpleTrafficCaptureSo public CompletableFuture> readNextTrafficStreamChunk( Supplier contextSupplier ) { - log.atTrace().setMessage(() -> "Test.readNextTrafficStreamChunk.counter=" + counter).log(); + log.atTrace().setMessage("Test.readNextTrafficStreamChunk.counter={}").addArgument(counter).log(); var i = counter.getAndIncrement(); if (i >= nStreamsToCreate) { return CompletableFuture.failedFuture(new EOFException()); diff --git a/TrafficCapture/trafficReplayer/src/test/java/org/opensearch/migrations/replay/ExhaustiveCapturedTrafficToHttpTransactionAccumulatorTest.java b/TrafficCapture/trafficReplayer/src/test/java/org/opensearch/migrations/replay/ExhaustiveCapturedTrafficToHttpTransactionAccumulatorTest.java index 84f9669d2..0e59b9d57 100644 --- a/TrafficCapture/trafficReplayer/src/test/java/org/opensearch/migrations/replay/ExhaustiveCapturedTrafficToHttpTransactionAccumulatorTest.java +++ b/TrafficCapture/trafficReplayer/src/test/java/org/opensearch/migrations/replay/ExhaustiveCapturedTrafficToHttpTransactionAccumulatorTest.java @@ -65,9 +65,8 @@ public static Arguments[] generateAllTestsAndConfirmComplete(IntStream seedStrea .toArray(Arguments[]::new); // Assertions.assertTrue(possibilitiesLeftToTest.isEmpty()); - log.atInfo() - .setMessage(() -> "Sufficient random seeds to generate a full cover of tests:{}") - .addArgument(seedsThatOfferUniqueTestCases.toString()) + log.atInfo().setMessage("Sufficient random seeds to generate a full cover of tests:{}") + .addArgument(seedsThatOfferUniqueTestCases) .log(); return argsArray; } diff --git a/TrafficCapture/trafficReplayer/src/test/java/org/opensearch/migrations/replay/RequestSenderOrchestratorTest.java b/TrafficCapture/trafficReplayer/src/test/java/org/opensearch/migrations/replay/RequestSenderOrchestratorTest.java index 86f387fcf..e5b8140ad 100644 --- a/TrafficCapture/trafficReplayer/src/test/java/org/opensearch/migrations/replay/RequestSenderOrchestratorTest.java +++ b/TrafficCapture/trafficReplayer/src/test/java/org/opensearch/migrations/replay/RequestSenderOrchestratorTest.java @@ -61,11 +61,15 @@ public TrackedFuture consumeBytes(ByteBuf nextRequestPacket) { return new TextTrackedFuture<>(CompletableFuture.supplyAsync(() -> { try { lastCheckIsReady.release(); - log.atDebug() - .setMessage(() -> "trying to acquire semaphore for packet #" + index + " and id=" + id) + log.atDebug().setMessage("trying to acquire semaphore for packet #{} and id={}") + .addArgument(index) + .addArgument(id) .log(); consumeIsReady.acquire(); - log.atDebug().setMessage(() -> "Acquired semaphore for packet #" + index + " and id=" + id).log(); + log.atDebug().setMessage("Acquired semaphore for packet #{} and id={}") + .addArgument(index) + .addArgument(id) + .log(); } catch (InterruptedException e) { throw Lombok.sneakyThrow(e); } @@ -79,7 +83,8 @@ public TrackedFuture finalizeRequest() { return new TextTrackedFuture<>(CompletableFuture.supplyAsync(() -> { try { lastCheckIsReady.release(); - log.atDebug().setMessage(() -> "trying to acquire semaphore for finalize and id=" + id).log(); + log.atDebug().setMessage("trying to acquire semaphore for finalize and id={}") + .addArgument(id).log(); consumeIsReady.acquire(); } catch (InterruptedException e) { throw Lombok.sneakyThrow(e); @@ -159,16 +164,12 @@ public void testFutureGraphBuildout() throws Exception { int finalI = i; int finalJ = j; log.atInfo() - .setMessage( - () -> "cf @ " - + finalI - + "," - + finalJ - + " =\n" - + reversedScheduledRequests.stream() - .map(sr -> getParentsDiagnosticString(sr, "")) - .collect(Collectors.joining("\n---\n")) - ) + .setMessage("cf @ {}, {} =\n{}") + .addArgument(finalI) + .addArgument(finalJ) + .addArgument(() -> reversedScheduledRequests.stream() + .map(sr -> getParentsDiagnosticString(sr, "")) + .collect(Collectors.joining("\n---\n"))) .log(); pktConsumer.consumeIsReady.release(); } @@ -281,14 +282,14 @@ public void testThatSchedulingWorks() throws Exception { body.toString(StandardCharsets.UTF_8) ); } catch (Throwable e) { - log.atError().setMessage(()->"caught exception(1)").setCause(e).log(); + log.atError().setCause(e).setMessage("caught exception(1)").log(); throw e; } } closeFuture.get(); log.error("Done running loop"); } catch (Throwable e) { - log.atError().setMessage(()->"caught exception(2)").setCause(e).log(); + log.atError().setCause(e).setMessage("caught exception(2)").log(); throw e; } } diff --git a/TrafficCapture/trafficReplayer/src/test/java/org/opensearch/migrations/replay/datahandlers/NettyPacketToHttpConsumerTest.java b/TrafficCapture/trafficReplayer/src/test/java/org/opensearch/migrations/replay/datahandlers/NettyPacketToHttpConsumerTest.java index fc28e9d74..e0b4eeb8d 100644 --- a/TrafficCapture/trafficReplayer/src/test/java/org/opensearch/migrations/replay/datahandlers/NettyPacketToHttpConsumerTest.java +++ b/TrafficCapture/trafficReplayer/src/test/java/org/opensearch/migrations/replay/datahandlers/NettyPacketToHttpConsumerTest.java @@ -141,7 +141,7 @@ public void testThatTestSetupIsCorrect() throws Exception { new String(response.payloadBytes, StandardCharsets.UTF_8) ); } catch (Throwable t) { - log.atError().setMessage(() -> "Error=").setCause(t).log(); + log.atError().setCause(t).setMessage("Error=").log(); } } } @@ -227,7 +227,7 @@ private void testPeerResets( NettyPacketToHttpConsumerTest::makeResponseContext ) ) { - log.atError().setMessage("Got port " + testServer.port).log(); + log.atError().setMessage("Got port {}").addArgument(testServer.port).log(); var sslContext = !useTls ? null : SslContextBuilder.forClient().trustManager(InsecureTrustManagerFactory.INSTANCE).build(); @@ -422,7 +422,7 @@ public void testResponseTakesLongerThanTimeout(boolean useTls) throws Exception var timeShifter = new TimeShifter(); var firstRequestTime = Instant.now(); timeShifter.setFirstTimestamp(firstRequestTime); - log.atInfo().setMessage("Initial Timestamp: " + firstRequestTime).log(); + log.atInfo().setMessage("Initial Timestamp: {}").addArgument(firstRequestTime).log(); var replayEngineFactory = new ReplayEngineFactory(responseTimeout, new TestFlowController(), @@ -452,13 +452,10 @@ public void testResponseTakesLongerThanTimeout(boolean useTls) throws Exception public void testTimeBetweenRequestsLongerThanResponseTimeout(boolean useTls) throws Exception { var responseTimeout = Duration.ofMillis(100); var timeBetweenRequests = responseTimeout.plus(Duration.ofMillis(10)); - log.atInfo() - .setMessage( - "Running testTimeBetweenRequestsLongerThanResponseTimeout with responseTimeout " - + responseTimeout - + " and timeBetweenRequests" - + timeBetweenRequests - ) + log.atInfo().setMessage("Running testTimeBetweenRequestsLongerThanResponseTimeout with responseTimeout {}" + + " and timeBetweenRequests {}") + .addArgument(responseTimeout) + .addArgument(timeBetweenRequests) .log(); try ( var testServer = SimpleNettyHttpServer.makeServer( @@ -483,7 +480,7 @@ public void testTimeBetweenRequestsLongerThanResponseTimeout(boolean useTls) thr var timeShifter = new TimeShifter(); var firstRequestTime = Instant.now(); timeShifter.setFirstTimestamp(firstRequestTime); - log.atInfo().setMessage("Initial Timestamp: " + firstRequestTime).log(); + log.atInfo().setMessage("Initial Timestamp: {}").addArgument(firstRequestTime).log(); var replayEngineFactory = new ReplayEngineFactory(responseTimeout, new TestFlowController(), timeShifter @@ -491,7 +488,7 @@ public void testTimeBetweenRequestsLongerThanResponseTimeout(boolean useTls) thr int i = 0; while (true) { var ctx = rootContext.getTestConnectionRequestContext("TEST", i); - log.atInfo().setMessage("Starting transformAndSendRequest for request " + i).log(); + log.atInfo().setMessage("Starting transformAndSendRequest for request {}").addArgument(i).log(); var tr = new RequestTransformerAndSender<>(new NoRetryEvaluatorFactory()); var requestFinishFuture = tr.transformAndSendRequest( @@ -504,7 +501,7 @@ public void testTimeBetweenRequestsLongerThanResponseTimeout(boolean useTls) thr () -> Stream.of(EXPECTED_REQUEST_STRING.getBytes(StandardCharsets.UTF_8))); var maxTimeToWaitForTimeoutOrResponse = Duration.ofSeconds(10); var aggregatedResponse = requestFinishFuture.get(maxTimeToWaitForTimeoutOrResponse); - log.atInfo().setMessage("RequestFinishFuture finished for request " + i).log(); + log.atInfo().setMessage("RequestFinishFuture finished for request {}").addArgument(i).log(); Assertions.assertNull(aggregatedResponse.getError()); var responseAsString = getResponsePacketsAsString(aggregatedResponse); Assertions.assertEquals(EXPECTED_RESPONSE_STRING, responseAsString); diff --git a/TrafficCapture/trafficReplayer/src/test/java/org/opensearch/migrations/replay/e2etests/FullTrafficReplayerTest.java b/TrafficCapture/trafficReplayer/src/test/java/org/opensearch/migrations/replay/e2etests/FullTrafficReplayerTest.java index ff3c46c43..8719968fa 100644 --- a/TrafficCapture/trafficReplayer/src/test/java/org/opensearch/migrations/replay/e2etests/FullTrafficReplayerTest.java +++ b/TrafficCapture/trafficReplayer/src/test/java/org/opensearch/migrations/replay/e2etests/FullTrafficReplayerTest.java @@ -252,10 +252,9 @@ public void fullTestWithThrottledStart() throws Throwable { ); var numExpectedRequests = streamAndSizes.numHttpTransactions; var trafficStreams = streamAndSizes.stream.collect(Collectors.toList()); - log.atInfo() - .setMessage( - () -> trafficStreams.stream() - .map(ts -> TrafficStreamUtils.summarizeTrafficStream(ts)) + log.atInfo().setMessage("{}") + .addArgument(() -> trafficStreams.stream() + .map(TrafficStreamUtils::summarizeTrafficStream) .collect(Collectors.joining("\n")) ) .log(); @@ -376,10 +375,9 @@ public void fullTestWithRestarts(int testSize, boolean randomize) throws Throwab ); var numExpectedRequests = streamAndSizes.numHttpTransactions; var trafficStreams = streamAndSizes.stream.collect(Collectors.toList()); - log.atInfo() - .setMessage( - () -> trafficStreams.stream() - .map(ts -> TrafficStreamUtils.summarizeTrafficStream(ts)) + log.atInfo().setMessage("{}") + .addArgument(() -> trafficStreams.stream() + .map(TrafficStreamUtils::summarizeTrafficStream) .collect(Collectors.joining("\n")) ) .log(); diff --git a/TrafficCapture/trafficReplayer/src/test/java/org/opensearch/migrations/replay/e2etests/KafkaRestartingTrafficReplayerTest.java b/TrafficCapture/trafficReplayer/src/test/java/org/opensearch/migrations/replay/e2etests/KafkaRestartingTrafficReplayerTest.java index 21fc6a585..0d139e7f6 100644 --- a/TrafficCapture/trafficReplayer/src/test/java/org/opensearch/migrations/replay/e2etests/KafkaRestartingTrafficReplayerTest.java +++ b/TrafficCapture/trafficReplayer/src/test/java/org/opensearch/migrations/replay/e2etests/KafkaRestartingTrafficReplayerTest.java @@ -104,11 +104,10 @@ public void fullTest(int testSize, boolean randomize) throws Throwable { randomize ); var trafficStreams = streamAndConsumer.stream.collect(Collectors.toList()); - log.atInfo() - .setMessage( - () -> trafficStreams.stream() - .map(TrafficStreamUtils::summarizeTrafficStream) - .collect(Collectors.joining("\n")) + log.atInfo().setMessage("{}") + .addArgument(() -> trafficStreams.stream() + .map(TrafficStreamUtils::summarizeTrafficStream) + .collect(Collectors.joining("\n")) ) .log(); @@ -145,7 +144,7 @@ private KafkaConsumer buildKafkaConsumer() { ); kafkaConsumerProps.setProperty("max.poll.interval.ms", DEFAULT_POLL_INTERVAL_MS + ""); var kafkaConsumer = new KafkaConsumer(kafkaConsumerProps); - log.atInfo().setMessage(() -> "Just built KafkaConsumer=" + kafkaConsumer).log(); + log.atInfo().setMessage("Just built KafkaConsumer={}").addArgument(kafkaConsumer).log(); return kafkaConsumer; } diff --git a/TrafficCapture/trafficReplayer/src/test/java/org/opensearch/migrations/replay/e2etests/TrafficReplayerRunner.java b/TrafficCapture/trafficReplayer/src/test/java/org/opensearch/migrations/replay/e2etests/TrafficReplayerRunner.java index 79885d1f7..e49771473 100644 --- a/TrafficCapture/trafficReplayer/src/test/java/org/opensearch/migrations/replay/e2etests/TrafficReplayerRunner.java +++ b/TrafficCapture/trafficReplayer/src/test/java/org/opensearch/migrations/replay/e2etests/TrafficReplayerRunner.java @@ -142,15 +142,12 @@ public static void runReplayer( } catch (TrafficReplayer.TerminationException e) { log.atLevel(e.originalCause instanceof FabricatedErrorToKillTheReplayer ? Level.INFO : Level.ERROR) .setCause(e.originalCause) - .setMessage(() -> "broke out of the replayer, with this shutdown reason") + .setMessage("broke out of the replayer, with this shutdown reason") .log(); log.atLevel(e.immediateCause == null ? Level.INFO : Level.ERROR) .setCause(e.immediateCause) - .setMessage( - () -> "broke out of the replayer, with the shutdown cause=" - + e.originalCause - + " and this immediate reason" - ) + .setMessage("broke out of the replayer, with the shutdown cause={} and this immediate reason") + .addArgument(e.originalCause) .log(); FabricatedErrorToKillTheReplayer killSignalError = e.originalCause instanceof FabricatedErrorToKillTheReplayer @@ -187,25 +184,22 @@ public static void runReplayer( } } log.atInfo() - .setMessage(() -> "completely received request keys=\n{}") - .addArgument(completelyHandledItems.keySet().stream().sorted().collect(Collectors.joining("\n"))) + .setMessage("completely received request keys=\n{}") + .addArgument(() -> completelyHandledItems.keySet().stream().sorted().collect(Collectors.joining("\n"))) .log(); var skippedPerRun = IntStream.range(0, receivedPerRun.size()) .map(i -> totalUniqueEverReceivedSizeAfterEachRun.get(i) - receivedPerRun.get(i)) .toArray(); - log.atInfo() - .setMessage( - () -> "Summary: (run #, uniqueSoFar, receivedThisRun, skipped)\n" - + IntStream.range(0, receivedPerRun.size()) - .mapToObj( - i -> new StringJoiner(", ").add("" + i) - .add("" + totalUniqueEverReceivedSizeAfterEachRun.get(i)) - .add("" + receivedPerRun.get(i)) - .add("" + skippedPerRun[i]) - .toString() - ) - .collect(Collectors.joining("\n")) - ) + log.atInfo().setMessage("Summary: (run #, uniqueSoFar, receivedThisRun, skipped)\n{}") + .addArgument(() -> IntStream.range(0, receivedPerRun.size()) + .mapToObj( + i -> new StringJoiner(", ").add("" + i) + .add("" + totalUniqueEverReceivedSizeAfterEachRun.get(i)) + .add("" + receivedPerRun.get(i)) + .add("" + skippedPerRun[i]) + .toString() + ) + .collect(Collectors.joining("\n"))) .log(); var skippedPerRunDiffs = IntStream.range(0, receivedPerRun.size() - 1) .map(i -> (skippedPerRun[i] <= skippedPerRun[i + 1]) ? 1 : 0) diff --git a/TrafficCapture/trafficReplayer/src/test/java/org/opensearch/migrations/replay/http/retries/HttpRetryTest.java b/TrafficCapture/trafficReplayer/src/test/java/org/opensearch/migrations/replay/http/retries/HttpRetryTest.java index 416686740..dacc7fec3 100644 --- a/TrafficCapture/trafficReplayer/src/test/java/org/opensearch/migrations/replay/http/retries/HttpRetryTest.java +++ b/TrafficCapture/trafficReplayer/src/test/java/org/opensearch/migrations/replay/http/retries/HttpRetryTest.java @@ -171,7 +171,7 @@ public void testConnectionFailuresNeverGiveUp() throws Exception { var e = Assertions.assertThrows(Exception.class, f::get); var shutdownResult = ccpShutdownFuture.get(); - log.atInfo().setCause(e).setMessage(() -> "exception: ").log(); + log.atInfo().setCause(e).setMessage("exception: ").log(); // doubly-nested ExecutionException. Once for the get() call here and once for the work done in submit, // which wraps the scheduled request's future Assertions.assertInstanceOf(IllegalStateException.class, e.getCause().getCause()); @@ -227,7 +227,7 @@ public void testMalformedResponseFailuresNeverGiveUp() throws Exception { var lastResponse = responseList.get(responseList.size()-1).getRawResponse(); Assertions.assertNotNull(lastResponse); Assertions.assertEquals(200, lastResponse.status().code()); - log.atInfo().setMessage(()->"responses: " + responses).log(); + log.atInfo().setMessage("responses: {}").addArgument(responses).log(); var retries = checkHttpRetryConsistency(rootContext); Assertions.assertTrue(retries > 0); var metrics = rootContext.inMemoryInstrumentationBundle.getFinishedMetrics(); diff --git a/TrafficCapture/trafficReplayer/src/test/java/org/opensearch/migrations/replay/kafka/KafkaCommitsWorkBetweenLongPollsTest.java b/TrafficCapture/trafficReplayer/src/test/java/org/opensearch/migrations/replay/kafka/KafkaCommitsWorkBetweenLongPollsTest.java index f5f5ffe13..841e4e4e8 100644 --- a/TrafficCapture/trafficReplayer/src/test/java/org/opensearch/migrations/replay/kafka/KafkaCommitsWorkBetweenLongPollsTest.java +++ b/TrafficCapture/trafficReplayer/src/test/java/org/opensearch/migrations/replay/kafka/KafkaCommitsWorkBetweenLongPollsTest.java @@ -44,7 +44,7 @@ private KafkaConsumer buildKafkaConsumer() { ); kafkaConsumerProps.setProperty("max.poll.interval.ms", DEFAULT_POLL_INTERVAL_MS + ""); var kafkaConsumer = new KafkaConsumer(kafkaConsumerProps); - log.atInfo().setMessage(() -> "Just built KafkaConsumer=" + kafkaConsumer).log(); + log.atInfo().setMessage("Just built KafkaConsumer={}").addArgument(kafkaConsumer).log(); return kafkaConsumer; } diff --git a/TrafficCapture/trafficReplayer/src/test/java/org/opensearch/migrations/replay/kafka/KafkaKeepAliveTests.java b/TrafficCapture/trafficReplayer/src/test/java/org/opensearch/migrations/replay/kafka/KafkaKeepAliveTests.java index 4bfad92b2..a87c75c0a 100644 --- a/TrafficCapture/trafficReplayer/src/test/java/org/opensearch/migrations/replay/kafka/KafkaKeepAliveTests.java +++ b/TrafficCapture/trafficReplayer/src/test/java/org/opensearch/migrations/replay/kafka/KafkaKeepAliveTests.java @@ -141,27 +141,27 @@ public void testBlockedReadsAndBrokenCommitsDontCauseReordering() throws Excepti keysReceived = new ArrayList<>(); log.info("re-establish a client connection so that the following commit will work"); - log.atInfo().setMessage(() -> "1 ..." + renderNextCommitsAsString()).log(); + log.atInfo().setMessage("1 ...{}").addArgument(this::renderNextCommitsAsString).log(); readNextNStreams(rootContext, trafficSource, keysReceived, 0, 1); - log.atInfo().setMessage(() -> "2 ..." + renderNextCommitsAsString()).log(); + log.atInfo().setMessage("2 ...{}").addArgument(this::renderNextCommitsAsString).log(); log.info("wait long enough to fall out of the group again"); Thread.sleep(2 * MAX_POLL_INTERVAL_MS); var keysReceivedUntilDrop2 = keysReceived; keysReceived = new ArrayList<>(); - log.atInfo().setMessage(() -> "re-establish... 3 ..." + renderNextCommitsAsString()).log(); + log.atInfo().setMessage("re-establish... 3 ...{}").addArgument(this::renderNextCommitsAsString).log(); readNextNStreams(rootContext, trafficSource, keysReceived, 0, 1); trafficSource.commitTrafficStream(keysReceivedUntilDrop1.get(1)); - log.atInfo().setMessage(() -> "re-establish... 4 ..." + renderNextCommitsAsString()).log(); + log.atInfo().setMessage("re-establish... 4 ...{}").addArgument(this::renderNextCommitsAsString).log(); readNextNStreams(rootContext, trafficSource, keysReceived, 1, 1); - log.atInfo().setMessage(() -> "5 ..." + renderNextCommitsAsString()).log(); + log.atInfo().setMessage("5 ...{}").addArgument(this::renderNextCommitsAsString).log(); Thread.sleep(2 * MAX_POLL_INTERVAL_MS); var keysReceivedUntilDrop3 = keysReceived; keysReceived = new ArrayList<>(); readNextNStreams(rootContext, trafficSource, keysReceived, 0, 3); - log.atInfo().setMessage(() -> "6 ..." + kafkaSource.trackingKafkaConsumer.nextCommitsToString()).log(); + log.atInfo().setMessage("6 ...{}").addArgument(kafkaSource.trackingKafkaConsumer::nextCommitsToString).log(); trafficSource.close(); } @@ -182,7 +182,7 @@ private static void readNextNStreams( var trafficStreams = kafkaSource.readNextTrafficStreamChunk(rootContext::createReadChunkContext).get(); trafficStreams.forEach(ts -> { var tsk = ts.getKey(); - log.atInfo().setMessage(() -> "checking for " + tsk).log(); + log.atInfo().setMessage("checking for {}").addArgument(tsk).log(); Assertions.assertFalse(keysReceived.contains(tsk)); keysReceived.add(tsk); }); diff --git a/TrafficCapture/trafficReplayer/src/test/java/org/opensearch/migrations/replay/kafka/KafkaTestUtils.java b/TrafficCapture/trafficReplayer/src/test/java/org/opensearch/migrations/replay/kafka/KafkaTestUtils.java index 8d2c2b1b4..575962f26 100644 --- a/TrafficCapture/trafficReplayer/src/test/java/org/opensearch/migrations/replay/kafka/KafkaTestUtils.java +++ b/TrafficCapture/trafficReplayer/src/test/java/org/opensearch/migrations/replay/kafka/KafkaTestUtils.java @@ -97,11 +97,10 @@ var record = new ProducerRecord(TEST_TOPIC_NAME, recordId, trafficStream.toByteA ); log.info("sending record with trafficStream=" + tsKeyStr); var sendFuture = kafkaProducer.send(record, (metadata, exception) -> { - log.atInfo() - .setCause(exception) - .setMessage( - () -> "completed send of TrafficStream with key=" + tsKeyStr + " metadata=" + metadata - ) + log.atInfo().setCause(exception) + .setMessage("completed send of TrafficStream with key={} metadata={}") + .addArgument(tsKeyStr) + .addArgument(metadata) .log(); }); var recordMetadata = sendFuture.get(); diff --git a/TrafficCapture/trafficReplayer/src/test/java/org/opensearch/migrations/replay/util/OnlineRadixSorterTest.java b/TrafficCapture/trafficReplayer/src/test/java/org/opensearch/migrations/replay/util/OnlineRadixSorterTest.java index a75accaa6..42bd67190 100644 --- a/TrafficCapture/trafficReplayer/src/test/java/org/opensearch/migrations/replay/util/OnlineRadixSorterTest.java +++ b/TrafficCapture/trafficReplayer/src/test/java/org/opensearch/migrations/replay/util/OnlineRadixSorterTest.java @@ -27,7 +27,7 @@ private static String add( if (m != null) { m.put(v, future); } - log.atInfo().setMessage(() -> "after adding work... " + future).log(); + log.atInfo().setMessage("after adding work... {}").addArgument(future).log(); return stringify(receivedItems.stream()); } @@ -47,8 +47,8 @@ void testOnlineRadixSorter_outOfOrder() { Assertions.assertEquals("", add(radixSorter, futureMap, receiverList, 3)); Assertions.assertEquals("", add(radixSorter, futureMap, receiverList, 4)); Assertions.assertEquals("1", add(radixSorter, futureMap, receiverList, 1)); - log.atInfo().setMessage(() -> "after adding work for '1'... future[3]=" + futureMap.get(3)).log(); - log.atInfo().setMessage(() -> "after adding work for '1'... future[4]=" + futureMap.get(4)).log(); + log.atInfo().setMessage("after adding work for '1'... future[3]={}").addArgument(()->futureMap.get(3)).log(); + log.atInfo().setMessage("after adding work for '1'... future[4]={}").addArgument(() -> futureMap.get(4)).log(); receiverList.clear(); Assertions.assertEquals("2,3,4", add(radixSorter, futureMap, receiverList, 2)); receiverList.clear(); diff --git a/TrafficCapture/trafficReplayer/src/test/java/org/opensearch/migrations/replay/util/TrackedFutureTest.java b/TrafficCapture/trafficReplayer/src/test/java/org/opensearch/migrations/replay/util/TrackedFutureTest.java index f55c9442c..4f5898212 100644 --- a/TrafficCapture/trafficReplayer/src/test/java/org/opensearch/migrations/replay/util/TrackedFutureTest.java +++ b/TrafficCapture/trafficReplayer/src/test/java/org/opensearch/migrations/replay/util/TrackedFutureTest.java @@ -23,7 +23,7 @@ public void test() throws Exception { try { observerSemaphore.release(); tfSemaphore.acquire(); - log.atInfo().setMessage(() -> "tf[" + finalI + "]" + lastTf).log(); + log.atInfo().setMessage("tf[{}]{}").addArgument(finalI).addArgument(lastTf).log(); } catch (InterruptedException e) { throw Lombok.sneakyThrow(e); } @@ -37,8 +37,8 @@ public void test() throws Exception { tfSemaphore.release(); Thread.sleep(10); int finalI = i; - log.atInfo().setMessage(() -> "top tf after " + finalI + " releases=" + finalTf).log(); + log.atInfo().setMessage("top tf after {} releases={}").addArgument(finalI).addArgument(finalTf).log(); } - log.atInfo().setMessage(() -> "final tf after any ancestor culls=" + finalTf).log(); + log.atInfo().setMessage("final tf after any ancestor culls={}").addArgument(finalTf).log(); } } diff --git a/TrafficCapture/trafficReplayer/src/testFixtures/java/org/opensearch/migrations/replay/TestCapturePacketToHttpHandler.java b/TrafficCapture/trafficReplayer/src/testFixtures/java/org/opensearch/migrations/replay/TestCapturePacketToHttpHandler.java index 8ae4953ec..c78a03342 100644 --- a/TrafficCapture/trafficReplayer/src/testFixtures/java/org/opensearch/migrations/replay/TestCapturePacketToHttpHandler.java +++ b/TrafficCapture/trafficReplayer/src/testFixtures/java/org/opensearch/migrations/replay/TestCapturePacketToHttpHandler.java @@ -44,20 +44,20 @@ public TestCapturePacketToHttpHandler( @Override public TrackedFuture consumeBytes(ByteBuf nextRequestPacket) { numConsumes.incrementAndGet(); - log.atDebug().setMessage(()->"incoming buffer refcnt=" + nextRequestPacket.refCnt()).log(); + log.atDebug().setMessage("incoming buffer refcnt={}").addArgument(nextRequestPacket::refCnt).log(); var duplicatedPacket = nextRequestPacket.retainedDuplicate(); return new TrackedFuture<>(CompletableFuture.runAsync(() -> { try { - log.atDebug().setMessage(()->"Running async future for " + nextRequestPacket).log(); + log.atDebug().setMessage("Running async future for {}").addArgument(nextRequestPacket).log(); Thread.sleep(consumeDuration.toMillis()); - log.atDebug().setMessage(()->"woke up from sleeping for " + nextRequestPacket).log(); + log.atDebug().setMessage("woke up from sleeping for {}").addArgument(nextRequestPacket).log(); } catch (InterruptedException e) { Thread.currentThread().interrupt(); throw Lombok.sneakyThrow(e); } try { - log.atDebug() - .setMessage(()->"At the time of committing the buffer, refcnt=" + duplicatedPacket.refCnt()).log(); + log.atDebug().setMessage("At the time of committing the buffer, refcnt={}") + .addArgument(duplicatedPacket::refCnt).log(); duplicatedPacket.readBytes(byteArrayOutputStream, nextRequestPacket.readableBytes()); duplicatedPacket.release(); } catch (IOException e) { diff --git a/TrafficCapture/trafficReplayer/src/testFixtures/java/org/opensearch/migrations/replay/traffic/generator/ExhaustiveTrafficStreamGenerator.java b/TrafficCapture/trafficReplayer/src/testFixtures/java/org/opensearch/migrations/replay/traffic/generator/ExhaustiveTrafficStreamGenerator.java index 0cb4cba90..61d58b16b 100644 --- a/TrafficCapture/trafficReplayer/src/testFixtures/java/org/opensearch/migrations/replay/traffic/generator/ExhaustiveTrafficStreamGenerator.java +++ b/TrafficCapture/trafficReplayer/src/testFixtures/java/org/opensearch/migrations/replay/traffic/generator/ExhaustiveTrafficStreamGenerator.java @@ -181,7 +181,7 @@ public static int classifyTrafficStream(HashSet possibilitiesLeftToTest getTypeFromObservation(ssl.get(ssl.size() - 1), outgoingLastType), ssl.size() ); - log.atTrace().setMessage(() -> "classification=" + classificationToString(type)).log(); + log.atTrace().setMessage("classification={}").addArgument(() -> classificationToString(type)).log(); previousObservationType = outgoingLastType; counter += possibilitiesLeftToTest.remove(type) ? 1 : 0; } @@ -274,9 +274,8 @@ private static TrafficStream[] fillCommandsAndSizesForSeed( var r2 = new Random(rSeed); var bufferSize = r2.nextInt(MAX_BUFFER_SIZE - MIN_BUFFER_SIZE) + MIN_BUFFER_SIZE; final var bufferBound = (int) (Math.abs(r2.nextGaussian()) * ((MAX_BUFFER_SIZE_MULTIPLIER * bufferSize))) + 1; - log.atTrace() - .setMessage( - () -> String.format( + log.atTrace().setMessage("{}") + .addArgument(() -> String.format( "bufferSize=%d bufferBound=%d maxPossibleReads=%d maxPossibleWrites=%d", bufferSize, bufferBound, @@ -357,9 +356,8 @@ public static StreamAndExpectedSizes generateStreamAndSumOfItsTransactions( : generateAllIndicativeRandomTrafficStreamsAndSizes(rootContext); var testCaseArr = generatedCases.toArray(RandomTrafficStreamAndTransactionSizes[]::new); log.atInfo() - .setMessage( - () -> "test case array = \n" - + Arrays.stream(testCaseArr) + .setMessage("test case array = \n{}") + .addArgument(() -> Arrays.stream(testCaseArr) .flatMap(tc -> Arrays.stream(tc.trafficStreams).map(TrafficStreamUtils::summarizeTrafficStream)) .collect(Collectors.joining("\n")) ) diff --git a/coreUtilities/src/main/java/org/opensearch/migrations/tracing/CommonScopedMetricInstruments.java b/coreUtilities/src/main/java/org/opensearch/migrations/tracing/CommonScopedMetricInstruments.java index 4f0dab4dc..347229b07 100644 --- a/coreUtilities/src/main/java/org/opensearch/migrations/tracing/CommonScopedMetricInstruments.java +++ b/coreUtilities/src/main/java/org/opensearch/migrations/tracing/CommonScopedMetricInstruments.java @@ -71,10 +71,9 @@ public CommonScopedMetricInstruments(Meter meter, ScopeLabels stockMetricLabels, private static List getBuckets(double firstBucketSize, double lastBucketCeiling) { var buckets = getExponentialBucketsBetween(firstBucketSize, lastBucketCeiling); log.atTrace() - .setMessage( - () -> "Setting buckets to " - + buckets.stream().map(x -> "" + x).collect(Collectors.joining(",", "[", "]")) - ) + .setMessage("Setting buckets to {}") + .addArgument(() -> buckets.stream().map(x -> "" + x) + .collect(Collectors.joining(",", "[", "]"))) .log(); return buckets; } diff --git a/coreUtilities/src/main/java/org/opensearch/migrations/tracing/RootOtelContext.java b/coreUtilities/src/main/java/org/opensearch/migrations/tracing/RootOtelContext.java index f877f73af..4fc51c1fc 100644 --- a/coreUtilities/src/main/java/org/opensearch/migrations/tracing/RootOtelContext.java +++ b/coreUtilities/src/main/java/org/opensearch/migrations/tracing/RootOtelContext.java @@ -95,13 +95,9 @@ public static OpenTelemetry initializeOpenTelemetryWithCollectorOrAsNoop( .map(endpoint -> initializeOpenTelemetryForCollector(endpoint, serviceName, instanceName)) .orElseGet(() -> { if (serviceName != null) { - log.atWarn() - .setMessage( - "Collector endpoint=null, so serviceName parameter '" - + serviceName - + "' is being ignored since a no-op OpenTelemetry object is being created" - ) - .log(); + log.atWarn().setMessage("Collector endpoint=null, so serviceName parameter '{}'" + + " is being ignored since a no-op OpenTelemetry object is being created") + .addArgument(serviceName).log(); } return initializeNoopOpenTelemetry(); }); diff --git a/coreUtilities/src/main/java/org/opensearch/migrations/utils/ProcessHelpers.java b/coreUtilities/src/main/java/org/opensearch/migrations/utils/ProcessHelpers.java index 6e4957755..f244fdfe0 100644 --- a/coreUtilities/src/main/java/org/opensearch/migrations/utils/ProcessHelpers.java +++ b/coreUtilities/src/main/java/org/opensearch/migrations/utils/ProcessHelpers.java @@ -15,7 +15,7 @@ public static String getNodeInstanceName() { var nodeId = Optional.of("ECS_TASK_ID").map(System::getenv) .or(() -> Optional.of("HOSTNAME").map(System::getenv)) .orElse(DEFAULT_NODE_ID); - log.atInfo().setMessage(() -> "getNodeInstanceName()=" + nodeId).log(); + log.atInfo().setMessage("getNodeInstanceName()={}").addArgument(nodeId).log(); return nodeId; } } diff --git a/coreUtilities/src/testFixtures/java/org/opensearch/migrations/tracing/BacktracingContextTracker.java b/coreUtilities/src/testFixtures/java/org/opensearch/migrations/tracing/BacktracingContextTracker.java index 99d2cf0e5..6c5cb5bdc 100644 --- a/coreUtilities/src/testFixtures/java/org/opensearch/migrations/tracing/BacktracingContextTracker.java +++ b/coreUtilities/src/testFixtures/java/org/opensearch/migrations/tracing/BacktracingContextTracker.java @@ -34,11 +34,8 @@ public void onCreated(IScopedInstrumentationAttributes ctx) { if (priorValue != null) { var priorKey = scopedContextToCallDetails.entrySet().stream().findFirst().get().getKey(); if (priorKey.equals(ctx)) { - log.atError() - .setMessage( - () -> "Trying to re-add the same context (" + ctx + ") back into this context tracker" - ) - .log(); + log.atError().setMessage("Trying to re-add the same context ({}) back into this context tracker") + .addArgument(ctx).log(); } } assert priorValue == null; @@ -55,8 +52,8 @@ public void onClosed(IScopedInstrumentationAttributes ctx) { assert oldCallDetails != null; final var oldE = oldCallDetails.closeStackException; if (oldE != null) { - log.atError().setCause(newExceptionStack).setMessage(() -> "Close is being called here").log(); - log.atError().setCause(oldE).setMessage(() -> "... but close was already called here").log(); + log.atError().setCause(newExceptionStack).setMessage("Close is being called here").log(); + log.atError().setCause(oldE).setMessage("... but close was already called here").log(); assert oldE == null; } oldCallDetails.closeStackException = new ExceptionForStackTracingOnly(); diff --git a/coreUtilities/src/testFixtures/java/org/opensearch/migrations/tracing/InMemoryInstrumentationBundle.java b/coreUtilities/src/testFixtures/java/org/opensearch/migrations/tracing/InMemoryInstrumentationBundle.java index 6d4b7c096..27cb163ed 100644 --- a/coreUtilities/src/testFixtures/java/org/opensearch/migrations/tracing/InMemoryInstrumentationBundle.java +++ b/coreUtilities/src/testFixtures/java/org/opensearch/migrations/tracing/InMemoryInstrumentationBundle.java @@ -123,9 +123,13 @@ public List getMetricsUntil( return true; } else { try { - log.atInfo().setMessage("{}").addArgument(() -> - "Waiting " + sleepAmount + "ms because the last test for metrics from " + metricName + - " on " + matchingMetrics.get() + " did not satisfy the predicate").log(); + log.atInfo() + .setMessage("Waiting {}ms because the last test for metrics from {} on {} " + + "did not satisfy the predicate") + .addArgument(sleepAmount) + .addArgument(metricName) + .addArgument(matchingMetrics::get) + .log(); Thread.sleep(sleepAmount); } catch (InterruptedException e) { Thread.currentThread().interrupt(); diff --git a/coreUtilities/src/testFixtures/java/org/opensearch/migrations/tracing/LoggingContextTracer.java b/coreUtilities/src/testFixtures/java/org/opensearch/migrations/tracing/LoggingContextTracer.java index 5d158fab5..d94fc2eeb 100644 --- a/coreUtilities/src/testFixtures/java/org/opensearch/migrations/tracing/LoggingContextTracer.java +++ b/coreUtilities/src/testFixtures/java/org/opensearch/migrations/tracing/LoggingContextTracer.java @@ -10,14 +10,14 @@ public class LoggingContextTracer implements IContextTracker { @Override public void onContextCreated(final IScopedInstrumentationAttributes context) { - log.atDebug().setMessage(CREATED_MESSAGE).addArgument(context.getActivityName()).log(); + log.atDebug().setMessage(CREATED_MESSAGE).addArgument(context::getActivityName).log(); } @Override public void onContextClosed(IScopedInstrumentationAttributes context) { log.atDebug() .setMessage(CLOSED_MESSAGE) - .addArgument(context.getActivityName()) + .addArgument(context::getActivityName) .addArgument(context::getPopulatedSpanAttributes) .log(); } diff --git a/transformation/src/test/java/org/opensearch/migrations/transformation/rules/IndexMappingTypeRemovalTest.java b/transformation/src/test/java/org/opensearch/migrations/transformation/rules/IndexMappingTypeRemovalTest.java index 28004107a..f1c74b996 100644 --- a/transformation/src/test/java/org/opensearch/migrations/transformation/rules/IndexMappingTypeRemovalTest.java +++ b/transformation/src/test/java/org/opensearch/migrations/transformation/rules/IndexMappingTypeRemovalTest.java @@ -110,13 +110,12 @@ private boolean applyTransformation(final ObjectNode indexJson) { var index = mock(Index.class); Mockito.when(index.getRawJson()).thenReturn(indexJson); - log.atInfo().setMessage("Original\n{}").addArgument(indexJson.toPrettyString()).log(); + log.atInfo().setMessage("Original\n{}").addArgument(() -> indexJson.toPrettyString()).log(); var wasChanged = transformer.applyTransformation(index); - log.atInfo() - .setMessage("After{}\n{}") + log.atInfo().setMessage("After{}\n{}") .addArgument(wasChanged ? " *Changed* " : "") - .addArgument(indexJson.toPrettyString()) + .addArgument(() -> indexJson.toPrettyString()) .log(); return wasChanged; } diff --git a/transformation/transformationPlugins/jsonMessageTransformers/jsonMessageTransformerLoaders/src/main/java/org/opensearch/migrations/transform/PredicateLoader.java b/transformation/transformationPlugins/jsonMessageTransformers/jsonMessageTransformerLoaders/src/main/java/org/opensearch/migrations/transform/PredicateLoader.java index 0b8fa3866..31f10a674 100644 --- a/transformation/transformationPlugins/jsonMessageTransformers/jsonMessageTransformerLoaders/src/main/java/org/opensearch/migrations/transform/PredicateLoader.java +++ b/transformation/transformationPlugins/jsonMessageTransformers/jsonMessageTransformerLoaders/src/main/java/org/opensearch/migrations/transform/PredicateLoader.java @@ -29,15 +29,13 @@ public PredicateLoader() { ); var inProgressProviders = new ArrayList(); for (var provider : predicateProviders) { - log.info("Adding IJsonPredicateProvider: " + provider); + log.atInfo().setMessage("Adding IJsonPredicateProvider: {}").addArgument(provider).log(); inProgressProviders.add(provider); } providers = Collections.unmodifiableList(inProgressProviders); log.atInfo() - .setMessage( - () -> "IJsonPredicateProvider loaded: " - + providers.stream().map(p -> p.getClass().toString()).collect(Collectors.joining("; ")) - ) + .setMessage(() -> "IJsonPredicateProvider loaded: {}") + .addArgument(() -> providers.stream().map(p -> p.getClass().toString()).collect(Collectors.joining("; "))) .log(); } @@ -81,9 +79,9 @@ private IJsonPredicate configurePredicateFromConfig(Map c) { if (predicateName.equals(key)) { var configuration = c.get(key); log.atInfo() - .setMessage( - () -> "Creating a Predicate through provider=" + p + " with configuration=" + configuration - ) + .setMessage("Creating a Predicate through provider={} with configuration={}") + .addArgument(p) + .addArgument(configuration) .log(); return p.createPredicate(configuration); } diff --git a/transformation/transformationPlugins/jsonMessageTransformers/jsonMessageTransformerLoaders/src/main/java/org/opensearch/migrations/transform/TransformationLoader.java b/transformation/transformationPlugins/jsonMessageTransformers/jsonMessageTransformerLoaders/src/main/java/org/opensearch/migrations/transform/TransformationLoader.java index ffcd6b767..136815c7b 100644 --- a/transformation/transformationPlugins/jsonMessageTransformers/jsonMessageTransformerLoaders/src/main/java/org/opensearch/migrations/transform/TransformationLoader.java +++ b/transformation/transformationPlugins/jsonMessageTransformers/jsonMessageTransformerLoaders/src/main/java/org/opensearch/migrations/transform/TransformationLoader.java @@ -36,11 +36,9 @@ public TransformationLoader() { inProgressProviders.add(provider); } providers = Collections.unmodifiableList(inProgressProviders); - log.atInfo() - .setMessage( - () -> "IJsonTransformerProviders loaded: " - + providers.stream().map(p -> p.getClass().toString()).collect(Collectors.joining("; ")) - ) + log.atInfo().setMessage("IJsonTransformerProviders loaded: {}") + .addArgument(() -> providers.stream().map(p -> p.getClass().toString()) + .collect(Collectors.joining("; "))) .log(); } @@ -86,10 +84,9 @@ private IJsonTransformer configureTransformerFromConfig(Map c) { var transformerName = p.getName(); if (transformerName.equals(key)) { var configuration = c.get(key); - log.atInfo() - .setMessage( - () -> "Creating a transformer through provider=" + p + " with configuration=" + configuration - ) + log.atInfo().setMessage("Creating a transformer through provider={} with configuration={}") + .addArgument(p) + .addArgument(configuration) .log(); return p.createTransformer(configuration); } diff --git a/transformation/transformationPlugins/jsonMessageTransformers/jsonMessageTransformerLoaders/src/test/java/org/opensearch/migrations/transform/replay/JsonTransformerTest.java b/transformation/transformationPlugins/jsonMessageTransformers/jsonMessageTransformerLoaders/src/test/java/org/opensearch/migrations/transform/replay/JsonTransformerTest.java index 6bb93b1bf..3434c31e8 100644 --- a/transformation/transformationPlugins/jsonMessageTransformers/jsonMessageTransformerLoaders/src/test/java/org/opensearch/migrations/transform/replay/JsonTransformerTest.java +++ b/transformation/transformationPlugins/jsonMessageTransformers/jsonMessageTransformerLoaders/src/test/java/org/opensearch/migrations/transform/replay/JsonTransformerTest.java @@ -68,7 +68,7 @@ public void testHttpTransform() throws IOException { var transformer = JsonJoltTransformer.newBuilder().addHostSwitchOperation(DUMMY_HOSTNAME_TEST_STRING).build(); var transformedDocument = transformer.transformJson(documentJson); String transformedJsonOutputStr = emitJson(transformedDocument); - log.info("transformed json document: " + transformedJsonOutputStr); + log.atInfo().setMessage("transformed json document: {}").addArgument(transformedJsonOutputStr).log(); Assertions.assertTrue(transformedJsonOutputStr.contains(DUMMY_HOSTNAME_TEST_STRING)); }