0

I am running a MongoDB instance inside a Docker container using docker-compose. My data is getting deleted once a day, even though I am using named volumes.

Here is my docker-compose.yml file:

version: '3'

volumes:
  production_announcements_postgres: {}
  production_announcements_postgres_backups: {}
  production_users_postgres: {}
  production_users_postgres_backups: {}
  mongo_production_stats: {}
  production_staticfiles: {}
  production_media: {}

services:
  admin:
    restart: unless-stopped
    build:
      context: .
      dockerfile: ./images/admin/compose/production/Dockerfile
    image: ustoz_production_admin
    depends_on:
      - redis
      - announcements_postgres
      - users_postgres
    volumes:
      - production_staticfiles:/app/staticfiles
      - production_media:/app/apps/media
    env_file:
      - ./images/admin/.envs/.production/.admin
      - ./images/admin/.envs/.production/.postgres
    ports:
      - '8000:8000'
    command: /start
  
  announcements_api:
    restart: unless-stopped
    build:
      context: .
      dockerfile: ./images/announcements/compose/production/api/Dockerfile
    image: production_announcement_api
    depends_on:
      - announcements_postgres
      - users_postgres
      - redis
    env_file:
      - ./images/announcements/.envs/.production/.api
      - ./images/announcements/.envs/.production/.postgres
    ports:
      - '8080:8080'
    command: /start
  
  users_api: &users_api
    restart: unless-stopped
    build:
      context: .
      dockerfile: ./images/users/compose/production/api/Dockerfile
    image: ustoz_production_users_api
    depends_on:
      - users_postgres
      - announcements_postgres
      - redis
    volumes:
      - production_media:/app/src/media
    env_file:
      - ./images/users/.envs/.production/.api
      - ./images/users/.envs/.production/.postgres
    ports:
      - '5000:5000'
    command: /start

  statistics_api:
    restart: unless-stopped
    build:
      context: .
      dockerfile: ./images/statistics/compose/production/api/Dockerfile
    image: production_statistics_api
    depends_on:
      - announcements_postgres
      - users_postgres
      - redis
      - statistics_mongo
    env_file:
      - ./images/statistics/.envs/.production/
    ports:
      - '8081:8081'
    command: /start

  announcements_postgres:
    restart: unless-stopped
    build:
      context: .
      dockerfile: ./images/announcements/compose/production/postgres/Dockerfile
    image: production_announcements_postgres
    volumes:
      - production_announcements_postgres:/var/lib/postgresql/data
      - production_announcements_postgres_backups:/backups
    env_file:
      - ./images/announcements/.envs/.production/.postgres

  users_postgres:
    restart: unless-stopped
    build:
      context: .
      dockerfile: ./images/users/compose/production/postgres/Dockerfile
    image: ustoz_production_users_postgres
    volumes:
      - ustoz_production_users_postgres:/var/lib/postgresql/data
      - ustoz_production_users_postgres_backups:/backups
    env_file:
      - ./images/users/.envs/.production/.postgres

  statistics_mongo:
    restart: unless-stopped
    image: mongo:latest
    ports:
      - '27017:27017'
    volumes:
      - mongo_production_stats:/data/db

  redis:
    restart: unless-stopped
    image: docker.io/redis:6

  celeryworker:
    <<: *users_api
    image: production_celeryworker
    depends_on:
      - redis
      - users_postgres
      - announcements_postgres
    ports: []
    command: /start-celeryworker

  nginx:
    restart: unless-stopped
    build:
      context: .
      dockerfile: ./images/nginx/Dockerfile
    image: production_nginx
    ports:
      - 8008:80
    depends_on:
      - admin
      - announcements_api
      - users_api
      - statistics_api
    volumes:
      - production_staticfiles:/usr/share/nginx/staticfiles:ro
      - production_media:/usr/share/nginx/media:ro

some last logs


{"t":{"$date":"2025-02-25T08:21:36.169+00:00"},"s":"I",  "c":"COMMAND",  "id":20337,   "ctx":"conn38","msg":"dropDatabase - starting","attr":{"db":"local"}}
{"t":{"$date":"2025-02-25T08:21:36.169+00:00"},"s":"I",  "c":"COMMAND",  "id":20338,   "ctx":"conn38","msg":"dropDatabase - dropping collection","attr":{"db":"local","namespace":"local.startup_log"}}
{"t":{"$date":"2025-02-25T08:21:36.169+00:00"},"s":"I",  "c":"STORAGE",  "id":22206,   "ctx":"conn38","msg":"Deferring table drop for index","attr":{"index":"_id_","namespace":"local.startup_log","uuid":{"uuid":{"$uuid":"22444b18-7149-4555-9472-c6ad651eda76"}},"ident":"index-3-10455057658662013419","dropTime":{"checkpointIteration":"64"}}}
{"t":{"$date":"2025-02-25T08:21:36.169+00:00"},"s":"I",  "c":"STORAGE",  "id":22214,   "ctx":"conn38","msg":"Deferring table drop for collection","attr":{"namespace":"local.startup_log","ident":"collection-2-10455057658662013419","dropTime":{"checkpointIteration":"64"}}}
{"t":{"$date":"2025-02-25T08:21:36.169+00:00"},"s":"I",  "c":"COMMAND",  "id":20336,   "ctx":"conn38","msg":"dropDatabase","attr":{"db":"local","numCollectionsDropped":1}}
{"t":{"$date":"2025-02-25T08:21:36.220+00:00"},"s":"I",  "c":"COMMAND",  "id":20337,   "ctx":"conn38","msg":"dropDatabase - starting","attr":{"db":"statistics"}}
{"t":{"$date":"2025-02-25T08:21:36.220+00:00"},"s":"I",  "c":"COMMAND",  "id":20338,   "ctx":"conn38","msg":"dropDatabase - dropping collection","attr":{"db":"statistics","namespace":"statistics.AccountViews"}}
{"t":{"$date":"2025-02-25T08:21:36.220+00:00"},"s":"I",  "c":"COMMAND",  "id":20338,   "ctx":"conn38","msg":"dropDatabase - dropping collection","attr":{"db":"statistics","namespace":"statistics.AnnouncementViews"}}
{"t":{"$date":"2025-02-25T08:21:36.221+00:00"},"s":"I",  "c":"STORAGE",  "id":22206,   "ctx":"conn38","msg":"Deferring table drop for index","attr":{"index":"_id_","namespace":"statistics.AccountViews","uuid":{"uuid":{"$uuid":"105f8210-2b06-4be1-87ad-753523b92119"}},"ident":"index-4-14992952572211099706","dropTime":{"checkpointIteration":"64"}}}
{"t":{"$date":"2025-02-25T08:21:36.221+00:00"},"s":"I",  "c":"STORAGE",  "id":22214,   "ctx":"conn38","msg":"Deferring table drop for collection","attr":{"namespace":"statistics.AccountViews","ident":"collection-3-14992952572211099706","dropTime":{"checkpointIteration":"64"}}}
{"t":{"$date":"2025-02-25T08:21:36.221+00:00"},"s":"I",  "c":"STORAGE",  "id":22206,   "ctx":"conn38","msg":"Deferring table drop for index","attr":{"index":"_id_","namespace":"statistics.AnnouncementViews","uuid":{"uuid":{"$uuid":"b02238d0-121b-4b82-8ec1-8f040e84734b"}},"ident":"index-6-14992952572211099706","dropTime":{"checkpointIteration":"64"}}}
{"t":{"$date":"2025-02-25T08:21:36.221+00:00"},"s":"I",  "c":"STORAGE",  "id":22214,   "ctx":"conn38","msg":"Deferring table drop for collection","attr":{"namespace":"statistics.AnnouncementViews","ident":"collection-5-14992952572211099706","dropTime":{"checkpointIteration":"64"}}}
{"t":{"$date":"2025-02-25T08:21:36.221+00:00"},"s":"I",  "c":"COMMAND",  "id":20336,   "ctx":"conn38","msg":"dropDatabase","attr":{"db":"statistics","numCollectionsDropped":2}}
{"t":{"$date":"2025-02-25T08:21:36.269+00:00"},"s":"I",  "c":"STORAGE",  "id":20320,   "ctx":"conn38","msg":"createCollection","attr":{"namespace":"READ__ME_TO_RECOVER_YOUR_DATA.README","uuidDisposition":"generated","uuid":{"uuid":{"$uuid":"a2461680-4b12-48a5-ab80-0fe831842292"}},"options":{}}}
{"t":{"$date":"2025-02-25T08:21:36.298+00:00"},"s":"I",  "c":"INDEX",    "id":20345,   "ctx":"conn38","msg":"Index build: done building","attr":{"buildUUID":null,"collectionUUID":{"uuid":{"$uuid":"a2461680-4b12-48a5-ab80-0fe831842292"}},"namespace":"READ__ME_TO_RECOVER_YOUR_DATA.README","index":"_id_","ident":"index-8-14992952572211099706","collectionIdent":"collection-7-14992952572211099706","commitTimestamp":null}}
{"t":{"$date":"2025-02-25T08:21:36.346+00:00"},"s":"I",  "c":"NETWORK",  "id":22944,   "ctx":"conn38","msg":"Connection ended","attr":{"remote":"196.251.91.83:42236","uuid":{"uuid":{"$uuid":"a9b6c653-0583-4f2c-b278-c0f0cdc3f574"}},"connectionId":38,"connectionCount":5}}
{"t":{"$date":"2025-02-25T08:21:36.353+00:00"},"s":"I",  "c":"NETWORK",  "id":22944,   "ctx":"conn39","msg":"Connection ended","attr":{"remote":"196.251.91.83:42252","uuid":{"uuid":{"$uuid":"93559982-715d-4001-b5b4-afe8f9ed10ff"}},"connectionId":39,"connectionCount":4}}
{"t":{"$date":"2025-02-25T08:21:36.354+00:00"},"s":"I",  "c":"-",        "id":20883,   "ctx":"conn37","msg":"Interrupted operation as its client disconnected","attr":{"opId":141313}}
{"t":{"$date":"2025-02-25T08:21:36.354+00:00"},"s":"I",  "c":"NETWORK",  "id":22944,   "ctx":"conn37","msg":"Connection ended","attr":{"remote":"196.251.91.83:42232","uuid":{"uuid":{"$uuid":"1f03dd6b-83db-479d-807c-50e46b926bfe"}},"connectionId":37,"connectionCount":3}}
{"t":{"$date":"2025-02-25T08:22:35.743+00:00"},"s":"I",  "c":"WTCHKPT",  "id":22430,   "ctx":"Checkpointer","msg":"WiredTiger message","attr":{"message":{"ts_sec":1740471755,"ts_usec":743647,"thread":"1:0x7ffb97db76c0","session_name":"WT_SESSION.checkpoint","category":"WT_VERB_CHECKPOINT_PROGRESS","category_id":7,"verbose_level":"DEBUG_1","verbose_level_id":1,"msg":"saving checkpoint snapshot min: 140, snapshot max: 140 snapshot count: 0, oldest timestamp: (0, 0) , meta checkpoint timestamp: (0, 0) base write gen: 13"}}}
{"t":{"$date":"2025-02-25T08:22:35.921+00:00"},"s":"I",  "c":"STORAGE",  "id":22260,   "ctx":"TimestampMonitor","msg":"Removing drop-pending idents with drop timestamps before timestamp","attr":{"timestamp":{"$timestamp":{"t":0,"i":0}}}}
{"t":{"$date":"2025-02-25T08:22:35.921+00:00"},"s":"I",  "c":"STORAGE",  "id":22237,   "ctx":"TimestampMonitor","msg":"Completing drop for ident","attr":{"ident":"index-3-10455057658662013419","dropTimestamp":{"$timestamp":{"t":0,"i":0}}}}
{"t":{"$date":"2025-02-25T08:22:35.924+00:00"},"s":"I",  "c":"STORAGE",  "id":6776600, "ctx":"TimestampMonitor","msg":"The ident was successfully dropped","attr":{"ident":"index-3-10455057658662013419","dropTimestamp":{"$timestamp":{"t":0,"i":0}}}}
{"t":{"$date":"2025-02-25T08:22:35.924+00:00"},"s":"I",  "c":"STORAGE",  "id":22237,   "ctx":"TimestampMonitor","msg":"Completing drop for ident","attr":{"ident":"collection-2-10455057658662013419","dropTimestamp":{"$timestamp":{"t":0,"i":0}}}}
{"t":{"$date":"2025-02-25T08:22:35.930+00:00"},"s":"I",  "c":"STORAGE",  "id":6776600, "ctx":"TimestampMonitor","msg":"The ident was successfully dropped","attr":{"ident":"collection-2-10455057658662013419","dropTimestamp":{"$timestamp":{"t":0,"i":0}}}}
{"t":{"$date":"2025-02-25T08:22:35.930+00:00"},"s":"I",  "c":"STORAGE",  "id":22237,   "ctx":"TimestampMonitor","msg":"Completing drop for ident","attr":{"ident":"index-4-14992952572211099706","dropTimestamp":{"$timestamp":{"t":0,"i":0}}}}
{"t":{"$date":"2025-02-25T08:22:35.937+00:00"},"s":"I",  "c":"STORAGE",  "id":6776600, "ctx":"TimestampMonitor","msg":"The ident was successfully dropped","attr":{"ident":"index-4-14992952572211099706","dropTimestamp":{"$timestamp":{"t":0,"i":0}}}}
{"t":{"$date":"2025-02-25T08:22:35.937+00:00"},"s":"I",  "c":"STORAGE",  "id":22237,   "ctx":"TimestampMonitor","msg":"Completing drop for ident","attr":{"ident":"collection-3-14992952572211099706","dropTimestamp":{"$timestamp":{"t":0,"i":0}}}}
{"t":{"$date":"2025-02-25T08:22:35.940+00:00"},"s":"I",  "c":"STORAGE",  "id":6776600, "ctx":"TimestampMonitor","msg":"The ident was successfully dropped","attr":{"ident":"collection-3-14992952572211099706","dropTimestamp":{"$timestamp":{"t":0,"i":0}}}}
{"t":{"$date":"2025-02-25T08:22:35.940+00:00"},"s":"I",  "c":"STORAGE",  "id":22237,   "ctx":"TimestampMonitor","msg":"Completing drop for ident","attr":{"ident":"index-6-14992952572211099706","dropTimestamp":{"$timestamp":{"t":0,"i":0}}}}
{"t":{"$date":"2025-02-25T08:22:35.951+00:00"},"s":"I",  "c":"STORAGE",  "id":6776600, "ctx":"TimestampMonitor","msg":"The ident was successfully dropped","attr":{"ident":"index-6-14992952572211099706","dropTimestamp":{"$timestamp":{"t":0,"i":0}}}}
{"t":{"$date":"2025-02-25T08:22:35.952+00:00"},"s":"I",  "c":"STORAGE",  "id":22237,   "ctx":"TimestampMonitor","msg":"Completing drop for ident","attr":{"ident":"collection-5-14992952572211099706","dropTimestamp":{"$timestamp":{"t":0,"i":0}}}}
{"t":{"$date":"2025-02-25T08:22:35.957+00:00"},"s":"I",  "c":"STORAGE",  "id":6776600, "ctx":"TimestampMonitor","msg":"The ident was successfully dropped","attr":{"ident":"collection-5-14992952572211099706","dropTimestamp":{"$timestamp":{"t":0,"i":0}}}}

I never encountered this issue on my local machine, but it happens in production (Ubuntu server).

What I have checked so far:

  1. The volume is correctly created:
    docker volume inspect mongo_production_stats
    Shows the correct mount point:
    /var/lib/docker/volumes/mongo_production_stats/_data
  2. The container is running and using the correct volume.
  3. There are no manual deletions or explicit docker volume rm commands being run.
  4. Running docker compose logs statistics_mongo does not show any database corruption or crashes.
  5. Test data disappears daily, even when the container is not restarted.
  6. Searched for similar questions but could not find any solution.

My questions:

  1. What could be causing this unexpected data loss once a day?
  2. How can I prevent Docker from losing MongoDB data?
  3. Are there any system logs or tools I can check to identify if Docker is deleting my volume?
15
  • Is it just a very specific set of data that is 'disappearing' everyday? You specifically mention "test data" - is there other data that is NOT disappearing? Is the instance/cluster stable (e.g. not crashing)? When you say there are "no manual deletions", are you referring to of the data files itself (as opposed to deletions from a client connected to the database)? I would be checking the mongod log and/or the internal oplog to find out what is happening, particularly if the service/database overall is stable. Commented Feb 20 at 17:55
  • @user20042973 The issue affects all data in the database, not just specific test data—everything gets wiped. The instance itself is stable and doesn't crash or restart unexpectedly. By "no manual deletions," I mean that no one is intentionally deleting data, either via a client or by removing database files directly. I'll check the mongod logs and oplog as you suggested. Do you have any specific pointers on what to look for? Commented Feb 21 at 10:06
  • Both mongoDB and docker are stable and well tested products. They don't just randomly loose data by accident. You clearly have overlooked something that is deleting the data. Left-over cron job, or code in your backend that was used for testing at some point that was accidentally left un-deleted would be the first thing I looked for. Also looking at the logs to confirm this would be one of the first things to do. Commented Feb 21 at 13:23
  • 1
    Where is your Ubuntu production server hosted? Commented Feb 22 at 15:03
  • 1
    @jQueeny No, postgres or other data are not being deleted, only mongodb data (not its volume directory), and there are no any restrictions on server. Commented Feb 23 at 10:09

1 Answer 1

3
+100

You have exposed the mongo database to the internet without configuring any security settings. Most likely a friendly hacker is reminding you to secure your database by deleting it every day (an unfriendly hacker would do much worse). Similar attacks have been reported against redis.

For details on how to secure a mongo database in a container, see:

Also consider whether you need to publish the port at all (container-to-container communication works without publishing any ports). You can remove the published ports by deleting that section of the compose file:

  statistics_mongo:
    restart: unless-stopped
    image: mongo:latest
    volumes:
      - mongo_production_stats:/data/db
Sign up to request clarification or add additional context in comments.

5 Comments

Thanks for your answer. I've added security configurations, but right now found logs about dropping database, I've updated my question, can you check the logs please?
You should probably look at why 196.251.91.83 is allowed to connect to your database and do that.
I think after setting up configuration security, data is not being deleted, thank you! but I am wondering how do they know that I am using mongodb and my server, Nobody knows about my site yet?
Everything is constantly being scanned on the Internet.
Never run mongod on the default port of 27017. Pick a completely differnent port like 22077.

Your Answer

By clicking “Post Your Answer”, you agree to our terms of service and acknowledge you have read our privacy policy.

Start asking to get answers

Find the answer to your question by asking.

Ask question

Explore related questions

See similar questions with these tags.