self.connection = oracledb.create_pool(
protocol=self.protocol,
host=self.host,
port=self.port,
service_name=self.service_name,
ssl_server_cert_dn=self.ssl_server_cert_dn,
user=self.username,
password=self.password,
min=0,
max=50,
expire_time=60
)
print("- Connected to the database.")
def _get_connection(self):
if not self.connection:
self.connect()
return self.connection.acquire()
def executemany_update(self, sql, params_list, label=None):
if not params_list:
return 0
label = label or 'executemany'
with self._get_connection() as conn:
with conn.cursor() as cur:
print(f"[SQL] {label}: executing {len(params_list)} records…")
cur.executemany(sql, params_list)
conn.commit()
print(f"[SQL] {label}: completed and COMMIT applied.")
return len(params_list)
def update_confidence_bulk_for_reviewer(self, items, rev_by, rev_at):
sql = """
UPDATE TABLE.SCHEMA
SET UPDATED_CONFIDENCE = :confidence,
REVIEW_STATUS = :review_status,
REV_COMMENT = :rev_comment,
REV_BY = :rev_by,
REV_AT = :rev_at
WHERE ID = :row_id
"""
params = [{
'row_id': it['row_id'],
'confidence': it.get('confidence', '1 - High'),
'review_status': it.get('review_status', 'Approved'),
'rev_comment': it.get('rev_comment', ''),
'rev_by': rev_by,
'rev_at': rev_at
} for it in items]
batch_size = 1000
batches = [params[i:i + batch_size] for i in range(0, len(params), batch_size)]
print(f"- Bulk update prepared batch_size: {batch_size}, Total Batches: {len(batches)}, Total rows: {len(params)}")
for batch_index, batch in enumerate(batches, start=1):
try:
print("batchindex, batch", batch_index, batch)
self.oracle2.executemany_update(sql, batch, label='bulk-update-confidence', timeout_ms=120)
self.oracle2.close_connection()
print(f"Batch {batch_index}/{len(batches)}: Successfully updated {len(batch)} rows.")
except Exception as e:
print(f"Batch {batch_index}/{len(batches)}: Error updating rows: {str(e)}")
raise
Hello, I am using Connection pool: The bulk insert/update is taking minutes to run even for 200 records, how do I make this efficient to run within seconds? update: It's simple update query getting the parameters from frontend - flask. Where items would be the data we'd get from flask: items = data.get("items", [])
self.connection = …belong to adef connect():? And when you say 200 records, do you mean 1 call toexecutemany_update()with aparams_listof 200 sets of values? What is the SQL query run bycursor()?executemany(), but the majority of time is spent in this black box, that's it? And the time to run is proportional to the number of items passed? (here we have 40 s / 200 items = 200 ms perUPDATE). Apart from an authentication problem (with eachUPDATEtrying to reconnect… but this isn't what the pool promises), this could be a normal timing forUPDATEs on your database (triggers, or simply wrecked index). To split the problem, do you reproduce by feeding SQL*Plus or such with 100UPDATEs as a flat SQL script equivalent to your Python one?