在我的产品环境中,我在GridFS上存储了数百万个文件;实际上我们也开始删除它们以回收空间。
数据库统计:
{
"db" : "Dummy",
"collections" : NumberInt(5),
"views" : NumberInt(0),
"objects" : NumberInt(60344915),
"avgObjSize" : 14560.387591033976,
"dataSize" : 878645351548.0,
"storageSize" : 724202864640.0,
"numExtents" : NumberInt(0),
"indexes" : NumberInt(7),
"indexSize" : 2633293824.0,
"ok" : 1.0
}
收藏状态:
{
"ns" : "Dummy.Coll1",
"size" : 4708693124.0,
"count" : NumberInt(12412954),
"avgObjSize" : NumberInt(379),
"storageSize" : 1710395392.0,
"capped" : false,
"nindexes" : NumberInt(1),
"indexDetails" : {
"_id_" : {
"metadata" : {
"formatVersion" : NumberInt(8),
"infoObj" : "{ \"v\" : 2, \"key\" : { \"_id\" : 1 }, \"name\" : \"_id_\", \"ns\" : \"Dummy.Coll1\" }"
},
"creationString" : "access_pattern_hint=none,allocation_size=4KB,app_metadata=(formatVersion=8,infoObj={ \"v\" : 2, \"key\" : { \"_id\" : 1 }, \"name\" : \"_id_\", \"ns\" : \"SistemiHubBridge.FattureRicevute\" }),block_allocation=best,block_compressor=,cache_resident=false,checksum=on,colgroups=,collator=,columns=,dictionary=0,encryption=(keyid=,name=),exclusive=false,extractor=,format=btree,huffman_key=,huffman_value=,ignore_in_memory_cache_size=false,immutable=false,internal_item_max=0,internal_key_max=0,internal_key_truncate=true,internal_page_max=16k,key_format=u,key_gap=10,leaf_item_max=0,leaf_key_max=0,leaf_page_max=16k,leaf_value_max=0,log=(enabled=true),lsm=(auto_throttle=true,bloom=true,bloom_bit_count=16,bloom_config=,bloom_hash_count=8,bloom_oldest=false,chunk_count_limit=0,chunk_max=5GB,chunk_size=10MB,merge_max=15,merge_min=0),memory_page_max=5MB,os_cache_dirty_max=0,os_cache_max=0,prefix_compression=true,prefix_compression_min=4,source=,split_deepen_min_child=0,split_deepen_per_child=0,split_pct=90,type=file,value_format=u",
"type" : "file",
"uri" : "statistics:table:index-10--4068436889899522135",
"LSM" : {
"bloom filter false positives" : NumberInt(0),
"bloom filter hits" : NumberInt(0),
"bloom filter misses" : NumberInt(0),
"bloom filter pages evicted from cache" : NumberInt(0),
"bloom filter pages read into cache" : NumberInt(0),
"bloom filters in the LSM tree" : NumberInt(0),
"chunks in the LSM tree" : NumberInt(0),
"highest merge generation in the LSM tree" : NumberInt(0),
"queries that could have benefited from a Bloom filter that did not exist" : NumberInt(0),
"sleep for LSM checkpoint throttle" : NumberInt(0),
"sleep for LSM merge throttle" : NumberInt(0),
"total size of bloom filters" : NumberInt(0)
},
"block-manager" : {
"allocations requiring file extension" : NumberInt(23804),
"blocks allocated" : NumberInt(512341),
"blocks freed" : NumberInt(197235),
"checkpoint size" : NumberInt(167079936),
"file allocation unit size" : NumberInt(4096),
"file bytes available for reuse" : NumberInt(1155072),
"file magic number" : NumberInt(120897),
"file major version number" : NumberInt(1),
"file size in bytes" : NumberInt(168251392),
"minor version number" : NumberInt(0)
},
"btree" : {
"btree checkpoint generation" : NumberInt(104735),
"column-store fixed-size leaf pages" : NumberInt(0),
"column-store internal pages" : NumberInt(0),
"column-store variable-size RLE encoded values" : NumberInt(0),
"column-store variable-size deleted values" : NumberInt(0),
"column-store variable-size leaf pages" : NumberInt(0),
"fixed-record size" : NumberInt(0),
"maximum internal page key size" : NumberInt(1474),
"maximum internal page size" : NumberInt(16384),
"maximum leaf page key size" : NumberInt(1474),
"maximum leaf page size" : NumberInt(16384),
"maximum leaf page value size" : NumberInt(7372),
"maximum tree depth" : NumberInt(4),
"number of key/value pairs" : NumberInt(0),
"overflow pages" : NumberInt(0),
"pages rewritten by compaction" : NumberInt(0),
"row-store internal pages" : NumberInt(0),
"row-store leaf pages" : NumberInt(0)
},
"cache" : {
"bytes currently in the cache" : NumberInt(247269),
"bytes read into cache" : NumberInt(31710621),
"bytes written from cache" : 2602068426.0,
"checkpoint blocked page eviction" : NumberInt(0),
"data source pages selected for eviction unable to be evicted" : NumberInt(11),
"hazard pointer blocked page eviction" : NumberInt(0),
"in-memory page passed criteria to be split" : NumberInt(184),
"in-memory page splits" : NumberInt(92),
"internal pages evicted" : NumberInt(101),
"internal pages split during eviction" : NumberInt(1),
"leaf pages split during eviction" : NumberInt(1342),
"modified pages evicted" : NumberInt(2843),
"overflow pages read into cache" : NumberInt(0),
"overflow values cached in memory" : NumberInt(0),
"page split during eviction deepened the tree" : NumberInt(1),
"page written requiring lookaside records" : NumberInt(0),
"pages read into cache" : NumberInt(2608),
"pages read into cache requiring lookaside entries" : NumberInt(0),
"pages requested from the cache" : NumberInt(12903606),
"pages written from cache" : NumberInt(309295),
"pages written requiring in-memory restoration" : NumberInt(0),
"tracked dirty bytes in the cache" : NumberInt(175139),
"unmodified pages evicted" : NumberInt(772)
},
"cache_walk" : {
"Average difference between current eviction generation when the page was last considered" : NumberInt(0),
"Average on-disk page image size seen" : NumberInt(0),
"Clean pages currently in cache" : NumberInt(0),
"Current eviction generation" : NumberInt(0),
"Dirty pages currently in cache" : NumberInt(0),
"Entries in the root page" : NumberInt(0),
"Internal pages currently in cache" : NumberInt(0),
"Leaf pages currently in cache" : NumberInt(0),
"Maximum difference between current eviction generation when the page was last considered" : NumberInt(0),
"Maximum page size seen" : NumberInt(0),
"Minimum on-disk page image size seen" : NumberInt(0),
"On-disk page image sizes smaller than a single allocation unit" : NumberInt(0),
"Pages created in memory and never written" : NumberInt(0),
"Pages currently queued for eviction" : NumberInt(0),
"Pages that could not be queued for eviction" : NumberInt(0),
"Refs skipped during cache traversal" : NumberInt(0),
"Size of the root page" : NumberInt(0),
"Total number of pages currently in cache" : NumberInt(0)
},
"compression" : {
"compressed pages read" : NumberInt(0),
"compressed pages written" : NumberInt(0),
"page written failed to compress" : NumberInt(0),
"page written was too small to compress" : NumberInt(0),
"raw compression call failed, additional data available" : NumberInt(0),
"raw compression call failed, no additional data available" : NumberInt(0),
"raw compression call succeeded" : NumberInt(0)
},
"cursor" : {
"bulk-loaded cursor-insert calls" : NumberInt(0),
"create calls" : NumberInt(8675),
"cursor-insert key and value bytes inserted" : NumberInt(220702884),
"cursor-remove key bytes removed" : NumberInt(0),
"cursor-update value bytes updated" : NumberInt(0),
"insert calls" : NumberInt(12267767),
"next calls" : NumberInt(0),
"prev calls" : NumberInt(0),
"remove calls" : NumberInt(0),
"reset calls" : NumberInt(12267769),
"restarted searches" : NumberInt(243),
"search calls" : NumberInt(2),
"search near calls" : NumberInt(0),
"truncate calls" : NumberInt(0),
"update calls" : NumberInt(0)
},
"reconciliation" : {
"dictionary matches" : NumberInt(0),
"fast-path pages deleted" : NumberInt(0),
"internal page key bytes discarded using suffix compression" : NumberInt(16378777),
"internal page multi-block writes" : NumberInt(79547),
"internal-page overflow keys" : NumberInt(0),
"leaf page key bytes discarded using prefix compression" : 14049832267.0,
"leaf page multi-block writes" : NumberInt(89248),
"leaf-page overflow keys" : NumberInt(0),
"maximum blocks required for a page" : NumberInt(2),
"overflow values written" : NumberInt(0),
"page checksum matches" : NumberInt(1740545),
"page reconciliation calls" : NumberInt(288189),
"page reconciliation calls for eviction" : NumberInt(67),
"pages deleted" : NumberInt(0)
},
"session" : {
"object compaction" : NumberInt(0),
"open cursor count" : NumberInt(11)
},
"transaction" : {
"update conflicts" : NumberInt(0)
}
}
},
"totalIndexSize" : NumberInt(168251392),
"indexSizes" : {
"_id_" : NumberInt(168251392)
},
"ok" : 1.0
}
我正在使用Mongo 3.4.15
删除了几百万个之后,空间或多或少是恒定的(考虑到新的插入),但是系统在新删除时开始变慢,我要删除GridFs文档,并使用bulkupdate更新行。
此问题是否可以由删除引起的碎片引起?我是否需要提出一些建议?我必须运行紧凑命令吗?契约是否需要使收集/数据库脱机,或者可以是常规维护或预定维护?