I am researching high performance logging in Python and so far have been disappointed by the performance of the python standard logging module - but there seem to be no alternatives. Below is a piece of code to performance test 4 different ways of logging:
import logging
import timeit
import time
import datetime
from logutils.queue import QueueListener, QueueHandler
import Queue
import threading
tmpq = Queue.Queue()
def std_manual_threading():
start = datetime.datetime.now()
logger = logging.getLogger()
hdlr = logging.FileHandler('std_manual.out', 'w')
logger.addHandler(hdlr)
logger.setLevel(logging.DEBUG)
def logger_thread(f):
while True:
item = tmpq.get(0.1)
if item == None:
break
logging.info(item)
f = open('manual.out', 'w')
lt = threading.Thread(target=logger_thread, args=(f,))
lt.start()
for i in range(100000):
tmpq.put("msg:%d" % i)
tmpq.put(None)
lt.join()
print datetime.datetime.now() - start
def nonstd_manual_threading():
start = datetime.datetime.now()
def logger_thread(f):
while True:
item = tmpq.get(0.1)
if item == None:
break
f.write(item+"\n")
f = open('manual.out', 'w')
lt = threading.Thread(target=logger_thread, args=(f,))
lt.start()
for i in range(100000):
tmpq.put("msg:%d" % i)
tmpq.put(None)
lt.join()
print datetime.datetime.now() - start
def std_logging_queue_handler():
start = datetime.datetime.now()
q = Queue.Queue(-1)
logger = logging.getLogger()
hdlr = logging.FileHandler('qtest.out', 'w')
ql = QueueListener(q, hdlr)
# Create log and set handler to queue handle
root = logging.getLogger()
root.setLevel(logging.DEBUG) # Log level = DEBUG
qh = QueueHandler(q)
root.addHandler(qh)
ql.start()
for i in range(100000):
logging.info("msg:%d" % i)
ql.stop()
print datetime.datetime.now() - start
def std_logging_single_thread():
start = datetime.datetime.now()
logger = logging.getLogger()
hdlr = logging.FileHandler('test.out', 'w')
logger.addHandler(hdlr)
logger.setLevel(logging.DEBUG)
for i in range(100000):
logging.info("msg:%d" % i)
print datetime.datetime.now() - start
if __name__ == "__main__":
"""
Conclusion: std logging about 3 times slower so for 100K lines simple file write is ~1 sec while std
logging ~3. If threads are introduced some overhead causes to go to ~4 and if QueueListener and events
are used with enhancement for thread sleeping that goes to ~5 (probably because log records are being
inserted into queue).
"""
print "Testing"
#std_logging_single_thread() # 3.4
std_logging_queue_handler() # 7, 6, 7 (5 seconds with sleep optimization)
#nonstd_manual_threading() # 1.08
#std_manual_threading() # 4.3
- The nonstd_manual_threading option works best since there is no overhead of the logging module but obviously you miss out on a lot of features such as formatters, filters and the nice interface
- The std_logging in a single thread is the next best thing but still about 3 times slower than nonstd manual threading.
- The std_manual_threading option dumps messages into a threadsafe queue and in a separate thread uses the standard logging module. That comes out to be about 25% higher than option 2, probably due to context switching costs.
- Finally, the option using "logutils"'s QueueHandler comes out to be the most expensive. I tweaked the code of logutils/queue.py's _monitor method to sleep for 10 millis after processing 500 messages as long as there are less than 100K messages in the queue. That brings the runtime down from 7 seconds to 5 seconds (probably due to avoiding context switching costs).
My question is, why is there so much performance overhead with the logging module and are there any alternatives? Being a performance sensitive app does it even make sense to use the logging module?
p.s.: I have profiled the different scenarios and seems like LogRecord creation is expensive.
SocketHandler.makePickle
with a leaner version (in case you don't need to send everything - and pickle may be overkill for sending a dict, anyway). How much time was shaved off by your steps 2, 3, 4? – Austenite