Service Runner Finder and complete task chain completed

This commit is contained in:
2025-08-11 19:26:49 +03:00
parent 405ba2e95d
commit ca98adc338
18 changed files with 1205 additions and 257 deletions

View File

@@ -68,102 +68,69 @@ def initialize_service():
if __name__ == "__main__":
logger.info("Starting IsBank Email Service")
print(f"Starting Service Mail Reader.")
# Initialize service
runner = initialize_service()
# Configurable parameters
normal_sleep_time = 10 # seconds between normal operations
error_sleep_time = 30 # seconds to wait after an error before retrying
max_consecutive_errors = 5 # maximum number of consecutive errors before longer pause
extended_error_sleep = 120 # seconds to wait after hitting max consecutive errors
normal_sleep_time = 10
error_sleep_time = 30
max_consecutive_errors = 5
extended_error_sleep = 120
consecutive_errors = 0
# Main service loop
while True:
try:
# Main processing
print("Fetching and setting mails...")
runner.fetch_and_set_mails()
# Reset error counter on success
if consecutive_errors > 0:
logger.info(f"Service recovered after {consecutive_errors} consecutive errors")
consecutive_errors = 0
# Normal operation sleep
sleep(normal_sleep_time)
except MailReaderService.REDIS_EXCEPTIONS as e:
# Redis-specific errors
consecutive_errors += 1
logger.error(f"Redis error (attempt {consecutive_errors}): {str(e)}")
# Use centralized reconnection handler from RedisHandler
redis_handler, need_extended_sleep = MailReaderService.handle_reconnection(
consecutive_errors=consecutive_errors, max_consecutive_errors=max_consecutive_errors
)
if redis_handler:
# Update runner's redis handler with the new instance
runner.redis_handler = redis_handler
runner.redis_connected = False # Will trigger reconnection on next cycle
# Sleep based on error count
runner.redis_connected = False
if need_extended_sleep:
sleep(extended_error_sleep)
else:
sleep(error_sleep_time)
except socket.error as e:
# Email connection errors
consecutive_errors += 1
logger.error(f"Email connection error (attempt {consecutive_errors}): {str(e)}")
# Try to re-establish email connection
try:
logger.info("Attempting to re-establish email connection...")
# Create new email service directly
email_service = EmailReaderService(IsBankConfig())
email_service.login_and_connect()
# Create new runner with existing Redis handler and new email service
redis_handler = runner.redis_handler # Preserve existing Redis handler
redis_handler = runner.redis_handler
runner = EmailServiceRunner(redis_handler=redis_handler, email_service=email_service)
logger.info("Successfully re-established email connection")
except Exception as email_retry_error:
logger.error(f"Failed to re-establish email connection: {str(email_retry_error)}")
# Determine sleep time based on consecutive errors
if consecutive_errors >= max_consecutive_errors:
logger.warning(f"Hit {max_consecutive_errors} consecutive email errors, taking longer pause")
sleep(extended_error_sleep)
else:
sleep(error_sleep_time)
except Exception as e:
# Any other unexpected errors
consecutive_errors += 1
logger.error(f"Unexpected error (attempt {consecutive_errors}): {str(e)}")
# For any other error, try to reinitialize everything after some delay
if consecutive_errors >= max_consecutive_errors:
logger.warning(f"Hit {max_consecutive_errors} consecutive errors, reinitializing service")
try:
# Try to clean up existing connections
try:
runner.drop()
except Exception as cleanup_error:
logger.warning(f"Error during cleanup: {str(cleanup_error)}")
# Reinitialize the service directly
redis_handler = MailReaderService()
email_service = EmailReaderService(IsBankConfig())
email_service.login_and_connect()
runner = EmailServiceRunner(redis_handler=redis_handler, email_service=email_service)
if runner:
logger.info("Successfully reinitialized email service runner")
consecutive_errors = 0 # Reset counter after reinitialization
consecutive_errors = 0
else:
logger.error("Failed to reinitialize email service runner")
except Exception as reinit_error:
@@ -171,6 +138,5 @@ if __name__ == "__main__":
sleep(extended_error_sleep)
else:
# For fewer consecutive errors, just retry the current runner
print(f"Error: {str(e)}")
sleep(error_sleep_time)