Understanding Race Conditions, Deadlocks, Resource Contention, and Data Consistency in Multithreaded Python
This article explains common multithreading problems such as race conditions, deadlocks, resource contention, and data consistency issues, and provides Python code examples that demonstrate synchronization techniques, lock ordering, connection pooling, thread pools, and transaction management to ensure correct and stable concurrent execution.
Race Condition
A race condition occurs when multiple threads access and modify shared data without proper synchronization, leading to inconsistent results. The example shows a Counter class protected by threading.Lock to ensure atomic increments.
import threading
class Counter:
def __init__(self):
self.count = 0
self.lock = threading.Lock()
def increment(self):
with self.lock:
self.count += 1
def get_count(self):
return self.count
# Test
counter = Counter()
threads = []
for _ in range(100):
t = threading.Thread(target=counter.increment)
threads.append(t)
t.start()
for t in threads:
t.join()
print(f"Final count: {counter.get_count()}")Deadlock
A deadlock happens when two or more threads wait indefinitely for each other to release resources. The first code example creates two locks and acquires them in opposite order, causing a deadlock. The second version uses timed lock acquisition to avoid the deadlock.
import threading
def method1(lock1, lock2):
with lock1:
print("Thread 1: Acquired lock1")
with lock2:
print("Thread 1: Acquired lock2")
def method2(lock1, lock2):
with lock2:
print("Thread 2: Acquired lock2")
with lock1:
print("Thread 2: Acquired lock1")
lock1 = threading.Lock()
lock2 = threading.Lock()
t1 = threading.Thread(target=method1, args=(lock1, lock2))
t2 = threading.Thread(target=method2, args=(lock1, lock2))
t1.start()
t2.start()
t1.join()
t2.join()
# Timeout version
import threading
def method1(lock1, lock2):
if lock1.acquire(timeout=1):
try:
print("Thread 1: Acquired lock1")
if lock2.acquire(timeout=1):
try:
print("Thread 1: Acquired lock2")
finally:
lock2.release()
finally:
lock1.release()
def method2(lock1, lock2):
if lock2.acquire(timeout=1):
try:
print("Thread 2: Acquired lock2")
if lock1.acquire(timeout=1):
try:
print("Thread 2: Acquired lock1")
finally:
lock1.release()
finally:
lock2.release()
lock1 = threading.Lock()
lock2 = threading.Lock()
t1 = threading.Thread(target=method1, args=(lock1, lock2))
t2 = threading.Thread(target=method2, args=(lock1, lock2))
t1.start() 2.start()
t1.join() t2.join()Resource Contention
When many threads compete for limited resources such as database connections, performance degrades. The example demonstrates a simple connection pool and a thread pool to limit concurrent access, as well as a token‑bucket‑style rate limiter.
import sqlite3
import concurrent.futures
import threading
connection_pool = []
pool_size = 5
def init_connection_pool():
for _ in range(pool_size):
conn = sqlite3.connect(':memory:')
connection_pool.append(conn)
def get_connection_from_pool():
with pool_lock:
if connection_pool:
return connection_pool.pop()
else:
return None
def release_connection_to_pool(conn):
with pool_lock:
connection_pool.append(conn)
def process_request(request_id):
conn = get_connection_from_pool()
if conn:
try:
cursor = conn.cursor()
cursor.execute("CREATE TABLE IF NOT EXISTS test (id INTEGER PRIMARY KEY, value TEXT)")
cursor.execute("INSERT INTO test (value) VALUES (?)", (f"Request {request_id}",))
conn.commit()
except Exception as e:
print(f"Error: {e}")
finally:
release_connection_to_pool(conn)
init_connection_pool()
executor = concurrent.futures.ThreadPoolExecutor(max_workers=10)
for request in range(100):
executor.submit(process_request, request)
executor.shutdown(wait=True)Concurrent Data Consistency
Ensuring data remains consistent across threads can be achieved with database transactions, optimistic or pessimistic locking. The first snippet uses SQLite transactions to serialize writes, while the second demonstrates an optimistic‑lock pattern with a version column.
import sqlite3
import threading
conn = sqlite3.connect(':memory:')
cursor = conn.cursor()
cursor.execute("CREATE TABLE IF NOT EXISTS test (id INTEGER PRIMARY KEY, value TEXT)")
conn.commit()
def update_value(value):
with conn:
cur = conn.cursor()
cur.execute("INSERT INTO test (value) VALUES (?)", (value,))
threads = []
for i in range(100):
t = threading.Thread(target=update_value, args=(f"Value {i}",))
threads.append(t)
t.start()
for t in threads:
t.join()
cursor.execute("SELECT * FROM test")
print(cursor.fetchall())
# Optimistic lock example
import sqlite3
import threading
conn = sqlite3.connect(':memory:')
cursor = conn.cursor()
cursor.execute("CREATE TABLE IF NOT EXISTS test (id INTEGER PRIMARY KEY, value TEXT, version INTEGER DEFAULT 0)")
conn.commit()
def update_value_optimistic(id, value, expected_version):
with conn:
cur = conn.cursor()
cur.execute("UPDATE test SET value = ?, version = version + 1 WHERE id = ? AND version = ?", (value, id, expected_version))
if cur.rowcount == 0:
raise Exception("Optimistic lock failed")
def worker(id, value):
while True:
cursor.execute("SELECT value, version FROM test WHERE id = ?", (id,))
row = cursor.fetchone()
if row:
_, current_version = row
try:
update_value_optimistic(id, value, current_version)
break
except Exception as e:
print(f"Worker {id}: {e}")
cursor.execute("INSERT INTO test (id, value) VALUES (?, ?)", (1, "Initial Value"))
conn.commit()
threads = []
for i in range(10):
t = threading.Thread(target=worker, args=(1, f"Value {i}"))
threads.append(t)
t.start()
for t in threads:
t.join()
cursor.execute("SELECT * FROM test")
print(cursor.fetchone())Conclusion
By applying proper synchronization primitives, lock ordering, connection pooling, thread pools, and transaction management, developers can effectively mitigate race conditions, deadlocks, resource contention, and data consistency problems in multithreaded Python applications.
Test Development Learning Exchange
Test Development Learning Exchange
How this landed with the community
Was this worth your time?
0 Comments
Thoughtful readers leave field notes, pushback, and hard-won operational detail here.