I am running a MongoDB instance inside a Docker container using docker-compose. My data is getting deleted once a day, even though I am using named volumes.
Here is my docker-compose.yml file:
version: '3'
volumes:
production_announcements_postgres: {}
production_announcements_postgres_backups: {}
production_users_postgres: {}
production_users_postgres_backups: {}
mongo_production_stats: {}
production_staticfiles: {}
production_media: {}
services:
admin:
restart: unless-stopped
build:
context: .
dockerfile: ./images/admin/compose/production/Dockerfile
image: ustoz_production_admin
depends_on:
- redis
- announcements_postgres
- users_postgres
volumes:
- production_staticfiles:/app/staticfiles
- production_media:/app/apps/media
env_file:
- ./images/admin/.envs/.production/.admin
- ./images/admin/.envs/.production/.postgres
ports:
- '8000:8000'
command: /start
announcements_api:
restart: unless-stopped
build:
context: .
dockerfile: ./images/announcements/compose/production/api/Dockerfile
image: production_announcement_api
depends_on:
- announcements_postgres
- users_postgres
- redis
env_file:
- ./images/announcements/.envs/.production/.api
- ./images/announcements/.envs/.production/.postgres
ports:
- '8080:8080'
command: /start
users_api: &users_api
restart: unless-stopped
build:
context: .
dockerfile: ./images/users/compose/production/api/Dockerfile
image: ustoz_production_users_api
depends_on:
- users_postgres
- announcements_postgres
- redis
volumes:
- production_media:/app/src/media
env_file:
- ./images/users/.envs/.production/.api
- ./images/users/.envs/.production/.postgres
ports:
- '5000:5000'
command: /start
statistics_api:
restart: unless-stopped
build:
context: .
dockerfile: ./images/statistics/compose/production/api/Dockerfile
image: production_statistics_api
depends_on:
- announcements_postgres
- users_postgres
- redis
- statistics_mongo
env_file:
- ./images/statistics/.envs/.production/
ports:
- '8081:8081'
command: /start
announcements_postgres:
restart: unless-stopped
build:
context: .
dockerfile: ./images/announcements/compose/production/postgres/Dockerfile
image: production_announcements_postgres
volumes:
- production_announcements_postgres:/var/lib/postgresql/data
- production_announcements_postgres_backups:/backups
env_file:
- ./images/announcements/.envs/.production/.postgres
users_postgres:
restart: unless-stopped
build:
context: .
dockerfile: ./images/users/compose/production/postgres/Dockerfile
image: ustoz_production_users_postgres
volumes:
- ustoz_production_users_postgres:/var/lib/postgresql/data
- ustoz_production_users_postgres_backups:/backups
env_file:
- ./images/users/.envs/.production/.postgres
statistics_mongo:
restart: unless-stopped
image: mongo:latest
ports:
- '27017:27017'
volumes:
- mongo_production_stats:/data/db
redis:
restart: unless-stopped
image: docker.io/redis:6
celeryworker:
<<: *users_api
image: production_celeryworker
depends_on:
- redis
- users_postgres
- announcements_postgres
ports: []
command: /start-celeryworker
nginx:
restart: unless-stopped
build:
context: .
dockerfile: ./images/nginx/Dockerfile
image: production_nginx
ports:
- 8008:80
depends_on:
- admin
- announcements_api
- users_api
- statistics_api
volumes:
- production_staticfiles:/usr/share/nginx/staticfiles:ro
- production_media:/usr/share/nginx/media:ro
some last logs
{"t":{"$date":"2025-02-25T08:21:36.169+00:00"},"s":"I", "c":"COMMAND", "id":20337, "ctx":"conn38","msg":"dropDatabase - starting","attr":{"db":"local"}}
{"t":{"$date":"2025-02-25T08:21:36.169+00:00"},"s":"I", "c":"COMMAND", "id":20338, "ctx":"conn38","msg":"dropDatabase - dropping collection","attr":{"db":"local","namespace":"local.startup_log"}}
{"t":{"$date":"2025-02-25T08:21:36.169+00:00"},"s":"I", "c":"STORAGE", "id":22206, "ctx":"conn38","msg":"Deferring table drop for index","attr":{"index":"_id_","namespace":"local.startup_log","uuid":{"uuid":{"$uuid":"22444b18-7149-4555-9472-c6ad651eda76"}},"ident":"index-3-10455057658662013419","dropTime":{"checkpointIteration":"64"}}}
{"t":{"$date":"2025-02-25T08:21:36.169+00:00"},"s":"I", "c":"STORAGE", "id":22214, "ctx":"conn38","msg":"Deferring table drop for collection","attr":{"namespace":"local.startup_log","ident":"collection-2-10455057658662013419","dropTime":{"checkpointIteration":"64"}}}
{"t":{"$date":"2025-02-25T08:21:36.169+00:00"},"s":"I", "c":"COMMAND", "id":20336, "ctx":"conn38","msg":"dropDatabase","attr":{"db":"local","numCollectionsDropped":1}}
{"t":{"$date":"2025-02-25T08:21:36.220+00:00"},"s":"I", "c":"COMMAND", "id":20337, "ctx":"conn38","msg":"dropDatabase - starting","attr":{"db":"statistics"}}
{"t":{"$date":"2025-02-25T08:21:36.220+00:00"},"s":"I", "c":"COMMAND", "id":20338, "ctx":"conn38","msg":"dropDatabase - dropping collection","attr":{"db":"statistics","namespace":"statistics.AccountViews"}}
{"t":{"$date":"2025-02-25T08:21:36.220+00:00"},"s":"I", "c":"COMMAND", "id":20338, "ctx":"conn38","msg":"dropDatabase - dropping collection","attr":{"db":"statistics","namespace":"statistics.AnnouncementViews"}}
{"t":{"$date":"2025-02-25T08:21:36.221+00:00"},"s":"I", "c":"STORAGE", "id":22206, "ctx":"conn38","msg":"Deferring table drop for index","attr":{"index":"_id_","namespace":"statistics.AccountViews","uuid":{"uuid":{"$uuid":"105f8210-2b06-4be1-87ad-753523b92119"}},"ident":"index-4-14992952572211099706","dropTime":{"checkpointIteration":"64"}}}
{"t":{"$date":"2025-02-25T08:21:36.221+00:00"},"s":"I", "c":"STORAGE", "id":22214, "ctx":"conn38","msg":"Deferring table drop for collection","attr":{"namespace":"statistics.AccountViews","ident":"collection-3-14992952572211099706","dropTime":{"checkpointIteration":"64"}}}
{"t":{"$date":"2025-02-25T08:21:36.221+00:00"},"s":"I", "c":"STORAGE", "id":22206, "ctx":"conn38","msg":"Deferring table drop for index","attr":{"index":"_id_","namespace":"statistics.AnnouncementViews","uuid":{"uuid":{"$uuid":"b02238d0-121b-4b82-8ec1-8f040e84734b"}},"ident":"index-6-14992952572211099706","dropTime":{"checkpointIteration":"64"}}}
{"t":{"$date":"2025-02-25T08:21:36.221+00:00"},"s":"I", "c":"STORAGE", "id":22214, "ctx":"conn38","msg":"Deferring table drop for collection","attr":{"namespace":"statistics.AnnouncementViews","ident":"collection-5-14992952572211099706","dropTime":{"checkpointIteration":"64"}}}
{"t":{"$date":"2025-02-25T08:21:36.221+00:00"},"s":"I", "c":"COMMAND", "id":20336, "ctx":"conn38","msg":"dropDatabase","attr":{"db":"statistics","numCollectionsDropped":2}}
{"t":{"$date":"2025-02-25T08:21:36.269+00:00"},"s":"I", "c":"STORAGE", "id":20320, "ctx":"conn38","msg":"createCollection","attr":{"namespace":"READ__ME_TO_RECOVER_YOUR_DATA.README","uuidDisposition":"generated","uuid":{"uuid":{"$uuid":"a2461680-4b12-48a5-ab80-0fe831842292"}},"options":{}}}
{"t":{"$date":"2025-02-25T08:21:36.298+00:00"},"s":"I", "c":"INDEX", "id":20345, "ctx":"conn38","msg":"Index build: done building","attr":{"buildUUID":null,"collectionUUID":{"uuid":{"$uuid":"a2461680-4b12-48a5-ab80-0fe831842292"}},"namespace":"READ__ME_TO_RECOVER_YOUR_DATA.README","index":"_id_","ident":"index-8-14992952572211099706","collectionIdent":"collection-7-14992952572211099706","commitTimestamp":null}}
{"t":{"$date":"2025-02-25T08:21:36.346+00:00"},"s":"I", "c":"NETWORK", "id":22944, "ctx":"conn38","msg":"Connection ended","attr":{"remote":"196.251.91.83:42236","uuid":{"uuid":{"$uuid":"a9b6c653-0583-4f2c-b278-c0f0cdc3f574"}},"connectionId":38,"connectionCount":5}}
{"t":{"$date":"2025-02-25T08:21:36.353+00:00"},"s":"I", "c":"NETWORK", "id":22944, "ctx":"conn39","msg":"Connection ended","attr":{"remote":"196.251.91.83:42252","uuid":{"uuid":{"$uuid":"93559982-715d-4001-b5b4-afe8f9ed10ff"}},"connectionId":39,"connectionCount":4}}
{"t":{"$date":"2025-02-25T08:21:36.354+00:00"},"s":"I", "c":"-", "id":20883, "ctx":"conn37","msg":"Interrupted operation as its client disconnected","attr":{"opId":141313}}
{"t":{"$date":"2025-02-25T08:21:36.354+00:00"},"s":"I", "c":"NETWORK", "id":22944, "ctx":"conn37","msg":"Connection ended","attr":{"remote":"196.251.91.83:42232","uuid":{"uuid":{"$uuid":"1f03dd6b-83db-479d-807c-50e46b926bfe"}},"connectionId":37,"connectionCount":3}}
{"t":{"$date":"2025-02-25T08:22:35.743+00:00"},"s":"I", "c":"WTCHKPT", "id":22430, "ctx":"Checkpointer","msg":"WiredTiger message","attr":{"message":{"ts_sec":1740471755,"ts_usec":743647,"thread":"1:0x7ffb97db76c0","session_name":"WT_SESSION.checkpoint","category":"WT_VERB_CHECKPOINT_PROGRESS","category_id":7,"verbose_level":"DEBUG_1","verbose_level_id":1,"msg":"saving checkpoint snapshot min: 140, snapshot max: 140 snapshot count: 0, oldest timestamp: (0, 0) , meta checkpoint timestamp: (0, 0) base write gen: 13"}}}
{"t":{"$date":"2025-02-25T08:22:35.921+00:00"},"s":"I", "c":"STORAGE", "id":22260, "ctx":"TimestampMonitor","msg":"Removing drop-pending idents with drop timestamps before timestamp","attr":{"timestamp":{"$timestamp":{"t":0,"i":0}}}}
{"t":{"$date":"2025-02-25T08:22:35.921+00:00"},"s":"I", "c":"STORAGE", "id":22237, "ctx":"TimestampMonitor","msg":"Completing drop for ident","attr":{"ident":"index-3-10455057658662013419","dropTimestamp":{"$timestamp":{"t":0,"i":0}}}}
{"t":{"$date":"2025-02-25T08:22:35.924+00:00"},"s":"I", "c":"STORAGE", "id":6776600, "ctx":"TimestampMonitor","msg":"The ident was successfully dropped","attr":{"ident":"index-3-10455057658662013419","dropTimestamp":{"$timestamp":{"t":0,"i":0}}}}
{"t":{"$date":"2025-02-25T08:22:35.924+00:00"},"s":"I", "c":"STORAGE", "id":22237, "ctx":"TimestampMonitor","msg":"Completing drop for ident","attr":{"ident":"collection-2-10455057658662013419","dropTimestamp":{"$timestamp":{"t":0,"i":0}}}}
{"t":{"$date":"2025-02-25T08:22:35.930+00:00"},"s":"I", "c":"STORAGE", "id":6776600, "ctx":"TimestampMonitor","msg":"The ident was successfully dropped","attr":{"ident":"collection-2-10455057658662013419","dropTimestamp":{"$timestamp":{"t":0,"i":0}}}}
{"t":{"$date":"2025-02-25T08:22:35.930+00:00"},"s":"I", "c":"STORAGE", "id":22237, "ctx":"TimestampMonitor","msg":"Completing drop for ident","attr":{"ident":"index-4-14992952572211099706","dropTimestamp":{"$timestamp":{"t":0,"i":0}}}}
{"t":{"$date":"2025-02-25T08:22:35.937+00:00"},"s":"I", "c":"STORAGE", "id":6776600, "ctx":"TimestampMonitor","msg":"The ident was successfully dropped","attr":{"ident":"index-4-14992952572211099706","dropTimestamp":{"$timestamp":{"t":0,"i":0}}}}
{"t":{"$date":"2025-02-25T08:22:35.937+00:00"},"s":"I", "c":"STORAGE", "id":22237, "ctx":"TimestampMonitor","msg":"Completing drop for ident","attr":{"ident":"collection-3-14992952572211099706","dropTimestamp":{"$timestamp":{"t":0,"i":0}}}}
{"t":{"$date":"2025-02-25T08:22:35.940+00:00"},"s":"I", "c":"STORAGE", "id":6776600, "ctx":"TimestampMonitor","msg":"The ident was successfully dropped","attr":{"ident":"collection-3-14992952572211099706","dropTimestamp":{"$timestamp":{"t":0,"i":0}}}}
{"t":{"$date":"2025-02-25T08:22:35.940+00:00"},"s":"I", "c":"STORAGE", "id":22237, "ctx":"TimestampMonitor","msg":"Completing drop for ident","attr":{"ident":"index-6-14992952572211099706","dropTimestamp":{"$timestamp":{"t":0,"i":0}}}}
{"t":{"$date":"2025-02-25T08:22:35.951+00:00"},"s":"I", "c":"STORAGE", "id":6776600, "ctx":"TimestampMonitor","msg":"The ident was successfully dropped","attr":{"ident":"index-6-14992952572211099706","dropTimestamp":{"$timestamp":{"t":0,"i":0}}}}
{"t":{"$date":"2025-02-25T08:22:35.952+00:00"},"s":"I", "c":"STORAGE", "id":22237, "ctx":"TimestampMonitor","msg":"Completing drop for ident","attr":{"ident":"collection-5-14992952572211099706","dropTimestamp":{"$timestamp":{"t":0,"i":0}}}}
{"t":{"$date":"2025-02-25T08:22:35.957+00:00"},"s":"I", "c":"STORAGE", "id":6776600, "ctx":"TimestampMonitor","msg":"The ident was successfully dropped","attr":{"ident":"collection-5-14992952572211099706","dropTimestamp":{"$timestamp":{"t":0,"i":0}}}}
I never encountered this issue on my local machine, but it happens in production (Ubuntu server).
What I have checked so far:
- The volume is correctly created:
docker volume inspect mongo_production_stats
Shows the correct mount point:
/var/lib/docker/volumes/mongo_production_stats/_data - The container is running and using the correct volume.
- There are no manual deletions or explicit
docker volume rmcommands being run. - Running
docker compose logs statistics_mongodoes not show any database corruption or crashes. - Test data disappears daily, even when the container is not restarted.
- Searched for similar questions but could not find any solution.
My questions:
- What could be causing this unexpected data loss once a day?
- How can I prevent Docker from losing MongoDB data?
- Are there any system logs or tools I can check to identify if Docker is deleting my volume?
mongodlog and/or the internal oplog to find out what is happening, particularly if the service/database overall is stable.