Skip to content

Commit

Permalink
Reduce logging level from info to debug (#258)
Browse files Browse the repository at this point in the history
* Reduce logging level from info to debug

* fixed GrGit
  • Loading branch information
musketyr authored Jul 18, 2024
1 parent d7fcfc0 commit f7e0ef9
Show file tree
Hide file tree
Showing 10 changed files with 32 additions and 23 deletions.
8 changes: 8 additions & 0 deletions docs/guide/guide.gradle
Original file line number Diff line number Diff line change
Expand Up @@ -15,6 +15,14 @@
* See the License for the specific language governing permissions and
* limitations under the License.
*/
buildscript {
repositories {
mavenCentral()
}
dependencies {
classpath("org.ajoberstar.grgit:grgit-core:$grgitVersion")
}
}
plugins {
id 'org.kordamp.gradle.guide'
id 'org.ajoberstar.git-publish'
Expand Down
1 change: 1 addition & 0 deletions gradle.properties
Original file line number Diff line number Diff line change
Expand Up @@ -33,6 +33,7 @@ closureSupportVersion=0.6.3
mockitoVersion=2.23.4
kotlinVersion=1.9.21
kspVersion=1.9.21-1.0.15
grgitVersion=5.0.0

# other versions creates conflicts in Groovydoc
groovyVersion = 4.0.16
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -99,7 +99,7 @@ public void start() {
);

try {
LOGGER.info("Starting Kinesis worker for {}", configuration.getStream());
LOGGER.debug("Starting Kinesis worker for {}", configuration.getStream());
executorService.submit(scheduler);
} catch (Exception t) {
LOGGER.error("Caught throwable while processing Kinesis data.", t);
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -64,13 +64,13 @@ static ShardRecordProcessor create(BiConsumer<String, KinesisClientRecord> consu

@Override
public void leaseLost(LeaseLostInput leaseLostInput) {
LOGGER.info("[{}] Lost lease, so terminating.", shardId);
LOGGER.debug("[{}] Lost lease, so terminating.", shardId);
}

@Override
public void shardEnded(ShardEndedInput shardEndedInput) {
try {
LOGGER.info("[{}] Reached shard end checkpointing.", shardId);
LOGGER.debug("[{}] Reached shard end checkpointing.", shardId);
shardEndedInput.checkpointer().checkpoint();
} catch (ShutdownException | InvalidStateException e) {
LOGGER.error("Exception while checkpointing at shard end. Giving up.", e);
Expand All @@ -80,7 +80,7 @@ public void shardEnded(ShardEndedInput shardEndedInput) {
@Override
public void shutdownRequested(ShutdownRequestedInput shutdownRequestedInput) {
try {
LOGGER.info("[{}] Scheduler is shutting down, checkpointing.", shardId);
LOGGER.debug("[{}] Scheduler is shutting down, checkpointing.", shardId);
shutdownRequestedInput.checkpointer().checkpoint();
} catch (ShutdownException | InvalidStateException e) {
LOGGER.error("Exception while checkpointing at requested shutdown. Giving up.", e);
Expand All @@ -91,12 +91,12 @@ public void initialize(InitializationInput initializationInput) {
this.shardId = initializationInput.shardId();

Thread thread = Thread.currentThread();
LOGGER.info("[{}] Initializing: thread = {}: {}, sequence = {}", shardId, thread.getId(), thread.getName(), initializationInput.extendedSequenceNumber());
LOGGER.debug("[{}] Initializing: thread = {}: {}, sequence = {}", shardId, thread.getId(), thread.getName(), initializationInput.extendedSequenceNumber());
}

public void processRecords(ProcessRecordsInput input) {
Thread thread = Thread.currentThread();
LOGGER.info("[{}] Processing: {} records, thread = {}: {}", shardId, input.records().size(), thread.getId(), thread.getName());
LOGGER.debug("[{}] Processing: {} records, thread = {}: {}", shardId, input.records().size(), thread.getId(), thread.getName());

// Process records and perform all exception handling.
processRecordsWithRetries(input.records());
Expand Down Expand Up @@ -135,7 +135,7 @@ private void processRecordsWithRetries(List<KinesisClientRecord> records) {
try {
Thread.sleep(BACKOFF_TIME_IN_MILLIS);
} catch (InterruptedException e) {
LOGGER.info("[{}] Interrupted sleep", shardId, e);
LOGGER.debug("[{}] Interrupted sleep", shardId, e);
}
}

Expand Down Expand Up @@ -165,22 +165,22 @@ private void processSingleRecord(KinesisClientRecord record) {
* @param checkpointer
*/
protected void checkpoint(RecordProcessorCheckpointer checkpointer) {
LOGGER.info("[{}] Checkpointing shard", shardId);
LOGGER.debug("[{}] Checkpointing shard", shardId);
for (int i = 0; i < NUM_RETRIES; i++) {
try {
checkpointer.checkpoint();
break;
} catch (ShutdownException se) {
// Ignore checkpoint if the processor instance has been shutdown (fail over).
LOGGER.info("[" + shardId + "] Caught shutdown exception, skipping checkpoint.", se);
LOGGER.debug("[" + shardId + "] Caught shutdown exception, skipping checkpoint.", se);
break;
} catch (ThrottlingException e) {
// Backoff and re-attempt checkpoint upon transient failures
if (i >= (NUM_RETRIES - 1)) {
LOGGER.error("[" + shardId + "] Checkpoint failed after " + (i + 1) + " attempts.", e);
break;
} else {
LOGGER.info("[" + shardId + "] Transient issue when checkpointing - attempt " + (i + 1) + " of $NUM_RETRIES", e);
LOGGER.debug("[" + shardId + "] Transient issue when checkpointing - attempt " + (i + 1) + " of $NUM_RETRIES", e);
}
} catch (InvalidStateException e) {
// This indicates an issue with the DynamoDB table (check for table, provisioned IOPS).
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -122,7 +122,7 @@ public void accept(String s, KinesisClientRecord record) {
if (event instanceof Event && StringUtils.isNotEmpty(consumerFilterKey)) {
String eventKey = ((Event) event).getConsumerFilterKey();
if (!consumerFilterKey.equals(eventKey)) {
LOGGER.info("Ignoring event because expected consumer filter key {} is not equal to the event's filter key {}: {}", consumerFilterKey, eventKey, s);
LOGGER.debug("Ignoring event because expected consumer filter key {} is not equal to the event's filter key {}: {}", consumerFilterKey, eventKey, s);
return;
}
}
Expand Down Expand Up @@ -209,7 +209,7 @@ public void process(BeanDefinition<?> beanDefinition, ExecutableMethod<?, ?> met
configurationName, key -> {
KinesisWorker w = kinesisWorkerFactory.create(getKinesisConfiguration(key));

LOGGER.info("Kinesis worker for configuration {} created", key);
LOGGER.debug("Kinesis worker for configuration {} created", key);

w.start();

Expand All @@ -219,7 +219,7 @@ public void process(BeanDefinition<?> beanDefinition, ExecutableMethod<?, ?> met

worker.addConsumer(consumer);

LOGGER.info("Kinesis listener for method {} declared in {} registered", method, beanDefinition.getBeanType());
LOGGER.debug("Kinesis listener for method {} declared in {} registered", method, beanDefinition.getBeanType());
}

@Override
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -49,7 +49,7 @@ public String getRegion(){
}
} catch (Exception e) {
// Ignore any exceptions and move onto the next provider
LOG.info("Unable to load region from " + provider.toString() + ": " + e.getMessage());
LOG.debug("Unable to load region from " + provider.toString() + ": " + e.getMessage());
}
}
return null;
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -289,11 +289,11 @@ class DefaultDynamoDBService<TItemClass> implements DynamoDBService<TItemClass>
globalSecondaryIndex.provisionedThroughput = new ProvisionedThroughput()
.withReadCapacityUnits(DEFAULT_READ_CAPACTIY)
.withWriteCapacityUnits(DEFAULT_WRITE_CAPACITY)
log.info("Creating DynamoDB GSI: ${globalSecondaryIndex}")
log.debug("Creating DynamoDB GSI: ${globalSecondaryIndex}")
}
}

log.info("Creating DynamoDB table: ${createTableRequest}")
log.debug("Creating DynamoDB table: ${createTableRequest}")

return client.createTable(createTableRequest)
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -81,7 +81,7 @@ public void start() {
builder.workerStateChangeListener(s -> applicationEventPublisher.publishEvent(new WorkerStateEvent(s, configuration.getStreamName())));

try {
LOGGER.info("Starting Kinesis worker for {}", configuration.getStreamName());
LOGGER.debug("Starting Kinesis worker for {}", configuration.getStreamName());
worker = builder.build();
executorService.execute(worker);
} catch (Exception t) {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -145,22 +145,22 @@ class DefaultRecordProcessor implements IRecordProcessor {
* @param checkpointer
*/
protected void checkpoint(IRecordProcessorCheckpointer checkpointer) {
log.info "[${shardId}] Checkpointing shard"
log.debug "[${shardId}] Checkpointing shard"
for (int i = 0; i < NUM_RETRIES; i++) {
try {
checkpointer.checkpoint()
break
} catch (ShutdownException se) {
// Ignore checkpoint if the processor instance has been shutdown (fail over).
log.info "[${shardId}] Caught shutdown exception, skipping checkpoint.", se
log.debug "[${shardId}] Caught shutdown exception, skipping checkpoint.", se
break
} catch (ThrottlingException e) {
// Backoff and re-attempt checkpoint upon transient failures
if (i >= (NUM_RETRIES - 1)) {
log.error "[${shardId}] Checkpoint failed after ${i + 1} attempts.", e
break
} else {
log.info "[${shardId}] Transient issue when checkpointing - attempt ${i + 1} of $NUM_RETRIES", e
log.debug "[${shardId}] Transient issue when checkpointing - attempt ${i + 1} of $NUM_RETRIES", e
}
} catch (InvalidStateException e) {
// This indicates an issue with the DynamoDB table (check for table, provisioned IOPS).
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -125,7 +125,7 @@ public void accept(String s, Record record) {
if (event instanceof Event && StringUtils.isNotEmpty(consumerFilterKey)) {
String eventKey = ((Event) event).getConsumerFilterKey();
if (!consumerFilterKey.equals(eventKey)) {
LOGGER.info("Ignoring event because expected consumer filter key {} is not equal to the event's filter key {}: {}", consumerFilterKey, eventKey, s);
LOGGER.debug("Ignoring event because expected consumer filter key {} is not equal to the event's filter key {}: {}", consumerFilterKey, eventKey, s);
return;
}
}
Expand Down Expand Up @@ -211,7 +211,7 @@ public void process(BeanDefinition<?> beanDefinition, ExecutableMethod<?, ?> met
configurationName, key -> {
KinesisWorker w = kinesisWorkerFactory.create(getKinesisConfiguration(key));

LOGGER.info("Kinesis worker for configuration {} created", key);
LOGGER.debug("Kinesis worker for configuration {} created", key);

w.start();

Expand All @@ -221,7 +221,7 @@ public void process(BeanDefinition<?> beanDefinition, ExecutableMethod<?, ?> met

worker.addConsumer(consumer);

LOGGER.info("Kinesis listener for method {} declared in {} registered", method, beanDefinition.getBeanType());
LOGGER.debug("Kinesis listener for method {} declared in {} registered", method, beanDefinition.getBeanType());
}

@Override
Expand Down

0 comments on commit f7e0ef9

Please sign in to comment.