volume_read_write.go 13 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412
  1. package storage
  2. import (
  3. "bytes"
  4. "errors"
  5. "fmt"
  6. "io"
  7. "os"
  8. "time"
  9. "github.com/chrislusf/seaweedfs/weed/glog"
  10. "github.com/chrislusf/seaweedfs/weed/storage/backend"
  11. "github.com/chrislusf/seaweedfs/weed/storage/needle"
  12. "github.com/chrislusf/seaweedfs/weed/storage/super_block"
  13. . "github.com/chrislusf/seaweedfs/weed/storage/types"
  14. )
  15. var ErrorNotFound = errors.New("not found")
  16. var ErrorDeleted = errors.New("already deleted")
  17. var ErrorSizeMismatch = errors.New("size mismatch")
  18. func (v *Volume) checkReadWriteError(err error) {
  19. if err == nil {
  20. if v.lastIoError != nil {
  21. v.lastIoError = nil
  22. }
  23. return
  24. }
  25. if err.Error() == "input/output error" {
  26. v.lastIoError = err
  27. }
  28. }
  29. // isFileUnchanged checks whether this needle to write is same as last one.
  30. // It requires serialized access in the same volume.
  31. func (v *Volume) isFileUnchanged(n *needle.Needle) bool {
  32. if v.Ttl.String() != "" {
  33. return false
  34. }
  35. nv, ok := v.nm.Get(n.Id)
  36. if ok && !nv.Offset.IsZero() && nv.Size.IsValid() {
  37. oldNeedle := new(needle.Needle)
  38. err := oldNeedle.ReadData(v.DataBackend, nv.Offset.ToActualOffset(), nv.Size, v.Version())
  39. if err != nil {
  40. glog.V(0).Infof("Failed to check updated file at offset %d size %d: %v", nv.Offset.ToActualOffset(), nv.Size, err)
  41. return false
  42. }
  43. if oldNeedle.Cookie == n.Cookie && oldNeedle.Checksum == n.Checksum && bytes.Equal(oldNeedle.Data, n.Data) {
  44. n.DataSize = oldNeedle.DataSize
  45. return true
  46. }
  47. }
  48. return false
  49. }
  50. // Destroy removes everything related to this volume
  51. func (v *Volume) Destroy() (err error) {
  52. if v.isCompacting {
  53. err = fmt.Errorf("volume %d is compacting", v.Id)
  54. return
  55. }
  56. close(v.asyncRequestsChan)
  57. storageName, storageKey := v.RemoteStorageNameKey()
  58. if v.HasRemoteFile() && storageName != "" && storageKey != "" {
  59. if backendStorage, found := backend.BackendStorages[storageName]; found {
  60. backendStorage.DeleteFile(storageKey)
  61. }
  62. }
  63. v.Close()
  64. removeVolumeFiles(v.DataFileName())
  65. removeVolumeFiles(v.IndexFileName())
  66. return
  67. }
  68. func removeVolumeFiles(filename string) {
  69. // basic
  70. os.Remove(filename + ".dat")
  71. os.Remove(filename + ".idx")
  72. os.Remove(filename + ".vif")
  73. // sorted index file
  74. os.Remove(filename + ".sdx")
  75. // compaction
  76. os.Remove(filename + ".cpd")
  77. os.Remove(filename + ".cpx")
  78. // level db indx file
  79. os.RemoveAll(filename + ".ldb")
  80. // marker for damaged or incomplete volume
  81. os.Remove(filename + ".note")
  82. }
  83. func (v *Volume) asyncRequestAppend(request *needle.AsyncRequest) {
  84. v.asyncRequestsChan <- request
  85. }
  86. func (v *Volume) syncWrite(n *needle.Needle) (offset uint64, size Size, isUnchanged bool, err error) {
  87. // glog.V(4).Infof("writing needle %s", needle.NewFileIdFromNeedle(v.Id, n).String())
  88. actualSize := needle.GetActualSize(Size(len(n.Data)), v.Version())
  89. v.dataFileAccessLock.Lock()
  90. defer v.dataFileAccessLock.Unlock()
  91. if MaxPossibleVolumeSize < v.nm.ContentSize()+uint64(actualSize) {
  92. err = fmt.Errorf("volume size limit %d exceeded! current size is %d", MaxPossibleVolumeSize, v.nm.ContentSize())
  93. return
  94. }
  95. return v.doWriteRequest(n)
  96. }
  97. func (v *Volume) writeNeedle2(n *needle.Needle, fsync bool) (offset uint64, size Size, isUnchanged bool, err error) {
  98. // glog.V(4).Infof("writing needle %s", needle.NewFileIdFromNeedle(v.Id, n).String())
  99. if n.Ttl == needle.EMPTY_TTL && v.Ttl != needle.EMPTY_TTL {
  100. n.SetHasTtl()
  101. n.Ttl = v.Ttl
  102. }
  103. if !fsync {
  104. return v.syncWrite(n)
  105. } else {
  106. asyncRequest := needle.NewAsyncRequest(n, true)
  107. // using len(n.Data) here instead of n.Size before n.Size is populated in n.Append()
  108. asyncRequest.ActualSize = needle.GetActualSize(Size(len(n.Data)), v.Version())
  109. v.asyncRequestAppend(asyncRequest)
  110. offset, _, isUnchanged, err = asyncRequest.WaitComplete()
  111. return
  112. }
  113. }
  114. func (v *Volume) doWriteRequest(n *needle.Needle) (offset uint64, size Size, isUnchanged bool, err error) {
  115. // glog.V(4).Infof("writing needle %s", needle.NewFileIdFromNeedle(v.Id, n).String())
  116. if v.isFileUnchanged(n) {
  117. size = Size(n.DataSize)
  118. isUnchanged = true
  119. return
  120. }
  121. // check whether existing needle cookie matches
  122. nv, ok := v.nm.Get(n.Id)
  123. if ok {
  124. existingNeedle, _, _, existingNeedleReadErr := needle.ReadNeedleHeader(v.DataBackend, v.Version(), nv.Offset.ToActualOffset())
  125. if existingNeedleReadErr != nil {
  126. err = fmt.Errorf("reading existing needle: %v", existingNeedleReadErr)
  127. return
  128. }
  129. if existingNeedle.Cookie != n.Cookie {
  130. glog.V(0).Infof("write cookie mismatch: existing %x, new %x", existingNeedle.Cookie, n.Cookie)
  131. err = fmt.Errorf("mismatching cookie %x", n.Cookie)
  132. return
  133. }
  134. }
  135. // append to dat file
  136. n.AppendAtNs = uint64(time.Now().UnixNano())
  137. offset, size, _, err = n.Append(v.DataBackend, v.Version())
  138. v.checkReadWriteError(err)
  139. if err != nil {
  140. return
  141. }
  142. v.lastAppendAtNs = n.AppendAtNs
  143. // add to needle map
  144. if !ok || uint64(nv.Offset.ToActualOffset()) < offset {
  145. if err = v.nm.Put(n.Id, ToOffset(int64(offset)), n.Size); err != nil {
  146. glog.V(4).Infof("failed to save in needle map %d: %v", n.Id, err)
  147. }
  148. }
  149. if v.lastModifiedTsSeconds < n.LastModified {
  150. v.lastModifiedTsSeconds = n.LastModified
  151. }
  152. return
  153. }
  154. func (v *Volume) syncDelete(n *needle.Needle) (Size, error) {
  155. // glog.V(4).Infof("delete needle %s", needle.NewFileIdFromNeedle(v.Id, n).String())
  156. actualSize := needle.GetActualSize(0, v.Version())
  157. v.dataFileAccessLock.Lock()
  158. defer v.dataFileAccessLock.Unlock()
  159. if MaxPossibleVolumeSize < v.nm.ContentSize()+uint64(actualSize) {
  160. err := fmt.Errorf("volume size limit %d exceeded! current size is %d", MaxPossibleVolumeSize, v.nm.ContentSize())
  161. return 0, err
  162. }
  163. return v.doDeleteRequest(n)
  164. }
  165. func (v *Volume) deleteNeedle2(n *needle.Needle) (Size, error) {
  166. // todo: delete info is always appended no fsync, it may need fsync in future
  167. fsync := false
  168. if !fsync {
  169. return v.syncDelete(n)
  170. } else {
  171. asyncRequest := needle.NewAsyncRequest(n, false)
  172. asyncRequest.ActualSize = needle.GetActualSize(0, v.Version())
  173. v.asyncRequestAppend(asyncRequest)
  174. _, size, _, err := asyncRequest.WaitComplete()
  175. return Size(size), err
  176. }
  177. }
  178. func (v *Volume) doDeleteRequest(n *needle.Needle) (Size, error) {
  179. glog.V(4).Infof("delete needle %s", needle.NewFileIdFromNeedle(v.Id, n).String())
  180. nv, ok := v.nm.Get(n.Id)
  181. // fmt.Println("key", n.Id, "volume offset", nv.Offset, "data_size", n.Size, "cached size", nv.Size)
  182. if ok && nv.Size.IsValid() {
  183. size := nv.Size
  184. n.Data = nil
  185. n.AppendAtNs = uint64(time.Now().UnixNano())
  186. offset, _, _, err := n.Append(v.DataBackend, v.Version())
  187. v.checkReadWriteError(err)
  188. if err != nil {
  189. return size, err
  190. }
  191. v.lastAppendAtNs = n.AppendAtNs
  192. if err = v.nm.Delete(n.Id, ToOffset(int64(offset))); err != nil {
  193. return size, err
  194. }
  195. return size, err
  196. }
  197. return 0, nil
  198. }
  199. // read fills in Needle content by looking up n.Id from NeedleMapper
  200. func (v *Volume) readNeedle(n *needle.Needle, readOption *ReadOption) (int, error) {
  201. v.dataFileAccessLock.RLock()
  202. defer v.dataFileAccessLock.RUnlock()
  203. nv, ok := v.nm.Get(n.Id)
  204. if !ok || nv.Offset.IsZero() {
  205. return -1, ErrorNotFound
  206. }
  207. readSize := nv.Size
  208. if readSize.IsDeleted() {
  209. if readOption != nil && readOption.ReadDeleted && readSize != TombstoneFileSize {
  210. glog.V(3).Infof("reading deleted %s", n.String())
  211. readSize = -readSize
  212. } else {
  213. return -1, ErrorDeleted
  214. }
  215. }
  216. if readSize == 0 {
  217. return 0, nil
  218. }
  219. err := n.ReadData(v.DataBackend, nv.Offset.ToActualOffset(), readSize, v.Version())
  220. if err == needle.ErrorSizeMismatch && OffsetSize == 4 {
  221. err = n.ReadData(v.DataBackend, nv.Offset.ToActualOffset()+int64(MaxPossibleVolumeSize), readSize, v.Version())
  222. }
  223. v.checkReadWriteError(err)
  224. if err != nil {
  225. return 0, err
  226. }
  227. bytesRead := len(n.Data)
  228. if !n.HasTtl() {
  229. return bytesRead, nil
  230. }
  231. ttlMinutes := n.Ttl.Minutes()
  232. if ttlMinutes == 0 {
  233. return bytesRead, nil
  234. }
  235. if !n.HasLastModifiedDate() {
  236. return bytesRead, nil
  237. }
  238. if time.Now().Before(time.Unix(0, int64(n.AppendAtNs)).Add(time.Duration(ttlMinutes) * time.Minute)) {
  239. return bytesRead, nil
  240. }
  241. return -1, ErrorNotFound
  242. }
  243. func (v *Volume) startWorker() {
  244. go func() {
  245. chanClosed := false
  246. for {
  247. // chan closed. go thread will exit
  248. if chanClosed {
  249. break
  250. }
  251. currentRequests := make([]*needle.AsyncRequest, 0, 128)
  252. currentBytesToWrite := int64(0)
  253. for {
  254. request, ok := <-v.asyncRequestsChan
  255. // volume may be closed
  256. if !ok {
  257. chanClosed = true
  258. break
  259. }
  260. if MaxPossibleVolumeSize < v.ContentSize()+uint64(currentBytesToWrite+request.ActualSize) {
  261. request.Complete(0, 0, false,
  262. fmt.Errorf("volume size limit %d exceeded! current size is %d", MaxPossibleVolumeSize, v.ContentSize()))
  263. break
  264. }
  265. currentRequests = append(currentRequests, request)
  266. currentBytesToWrite += request.ActualSize
  267. // submit at most 4M bytes or 128 requests at one time to decrease request delay.
  268. // it also need to break if there is no data in channel to avoid io hang.
  269. if currentBytesToWrite >= 4*1024*1024 || len(currentRequests) >= 128 || len(v.asyncRequestsChan) == 0 {
  270. break
  271. }
  272. }
  273. if len(currentRequests) == 0 {
  274. continue
  275. }
  276. v.dataFileAccessLock.Lock()
  277. end, _, e := v.DataBackend.GetStat()
  278. if e != nil {
  279. for i := 0; i < len(currentRequests); i++ {
  280. currentRequests[i].Complete(0, 0, false,
  281. fmt.Errorf("cannot read current volume position: %v", e))
  282. }
  283. v.dataFileAccessLock.Unlock()
  284. continue
  285. }
  286. for i := 0; i < len(currentRequests); i++ {
  287. if currentRequests[i].IsWriteRequest {
  288. offset, size, isUnchanged, err := v.doWriteRequest(currentRequests[i].N)
  289. currentRequests[i].UpdateResult(offset, uint64(size), isUnchanged, err)
  290. } else {
  291. size, err := v.doDeleteRequest(currentRequests[i].N)
  292. currentRequests[i].UpdateResult(0, uint64(size), false, err)
  293. }
  294. }
  295. // if sync error, data is not reliable, we should mark the completed request as fail and rollback
  296. if err := v.DataBackend.Sync(); err != nil {
  297. // todo: this may generate dirty data or cause data inconsistent, may be weed need to panic?
  298. if te := v.DataBackend.Truncate(end); te != nil {
  299. glog.V(0).Infof("Failed to truncate %s back to %d with error: %v", v.DataBackend.Name(), end, te)
  300. }
  301. for i := 0; i < len(currentRequests); i++ {
  302. if currentRequests[i].IsSucceed() {
  303. currentRequests[i].UpdateResult(0, 0, false, err)
  304. }
  305. }
  306. }
  307. for i := 0; i < len(currentRequests); i++ {
  308. currentRequests[i].Submit()
  309. }
  310. v.dataFileAccessLock.Unlock()
  311. }
  312. }()
  313. }
  314. type VolumeFileScanner interface {
  315. VisitSuperBlock(super_block.SuperBlock) error
  316. ReadNeedleBody() bool
  317. VisitNeedle(n *needle.Needle, offset int64, needleHeader, needleBody []byte) error
  318. }
  319. func ScanVolumeFile(dirname string, collection string, id needle.VolumeId,
  320. needleMapKind NeedleMapKind,
  321. volumeFileScanner VolumeFileScanner) (err error) {
  322. var v *Volume
  323. if v, err = loadVolumeWithoutIndex(dirname, collection, id, needleMapKind); err != nil {
  324. return fmt.Errorf("failed to load volume %d: %v", id, err)
  325. }
  326. if err = volumeFileScanner.VisitSuperBlock(v.SuperBlock); err != nil {
  327. return fmt.Errorf("failed to process volume %d super block: %v", id, err)
  328. }
  329. defer v.Close()
  330. version := v.Version()
  331. offset := int64(v.SuperBlock.BlockSize())
  332. return ScanVolumeFileFrom(version, v.DataBackend, offset, volumeFileScanner)
  333. }
  334. func ScanVolumeFileFrom(version needle.Version, datBackend backend.BackendStorageFile, offset int64, volumeFileScanner VolumeFileScanner) (err error) {
  335. n, nh, rest, e := needle.ReadNeedleHeader(datBackend, version, offset)
  336. if e != nil {
  337. if e == io.EOF {
  338. return nil
  339. }
  340. return fmt.Errorf("cannot read %s at offset %d: %v", datBackend.Name(), offset, e)
  341. }
  342. for n != nil {
  343. var needleBody []byte
  344. if volumeFileScanner.ReadNeedleBody() {
  345. // println("needle", n.Id.String(), "offset", offset, "size", n.Size, "rest", rest)
  346. if needleBody, err = n.ReadNeedleBody(datBackend, version, offset+NeedleHeaderSize, rest); err != nil {
  347. glog.V(0).Infof("cannot read needle head [%d, %d) body [%d, %d) body length %d: %v", offset, offset+NeedleHeaderSize, offset+NeedleHeaderSize, offset+NeedleHeaderSize+rest, rest, err)
  348. // err = fmt.Errorf("cannot read needle body: %v", err)
  349. // return
  350. }
  351. }
  352. err := volumeFileScanner.VisitNeedle(n, offset, nh, needleBody)
  353. if err == io.EOF {
  354. return nil
  355. }
  356. if err != nil {
  357. glog.V(0).Infof("visit needle error: %v", err)
  358. return fmt.Errorf("visit needle error: %v", err)
  359. }
  360. offset += NeedleHeaderSize + rest
  361. glog.V(4).Infof("==> new entry offset %d", offset)
  362. if n, nh, rest, err = needle.ReadNeedleHeader(datBackend, version, offset); err != nil {
  363. if err == io.EOF {
  364. return nil
  365. }
  366. return fmt.Errorf("cannot read needle header at offset %d: %v", offset, err)
  367. }
  368. glog.V(4).Infof("new entry needle size:%d rest:%d", n.Size, rest)
  369. }
  370. return nil
  371. }