file.go 8.8 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362
  1. package filesys
  2. import (
  3. "context"
  4. "io"
  5. "os"
  6. "sort"
  7. "time"
  8. "github.com/seaweedfs/fuse"
  9. "github.com/seaweedfs/fuse/fs"
  10. "github.com/chrislusf/seaweedfs/weed/filer"
  11. "github.com/chrislusf/seaweedfs/weed/util/log"
  12. "github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
  13. "github.com/chrislusf/seaweedfs/weed/util"
  14. )
  15. const blockSize = 512
  16. var _ = fs.Node(&File{})
  17. var _ = fs.NodeOpener(&File{})
  18. var _ = fs.NodeFsyncer(&File{})
  19. var _ = fs.NodeSetattrer(&File{})
  20. var _ = fs.NodeGetxattrer(&File{})
  21. var _ = fs.NodeSetxattrer(&File{})
  22. var _ = fs.NodeRemovexattrer(&File{})
  23. var _ = fs.NodeListxattrer(&File{})
  24. var _ = fs.NodeForgetter(&File{})
  25. type File struct {
  26. Name string
  27. dir *Dir
  28. wfs *WFS
  29. entry *filer_pb.Entry
  30. entryViewCache []filer.VisibleInterval
  31. isOpen int
  32. reader io.ReaderAt
  33. dirtyMetadata bool
  34. }
  35. func (file *File) fullpath() util.FullPath {
  36. return util.NewFullPath(file.dir.FullPath(), file.Name)
  37. }
  38. func (file *File) Attr(ctx context.Context, attr *fuse.Attr) (err error) {
  39. log.Tracef("file Attr %s, open:%v, existing attr: %+v", file.fullpath(), file.isOpen, attr)
  40. entry := file.entry
  41. if file.isOpen <= 0 || entry == nil {
  42. if entry, err = file.maybeLoadEntry(ctx); err != nil {
  43. return err
  44. }
  45. }
  46. attr.Inode = file.fullpath().AsInode()
  47. attr.Valid = time.Second
  48. attr.Mode = os.FileMode(entry.Attributes.FileMode)
  49. attr.Size = filer.FileSize(entry)
  50. if file.isOpen > 0 {
  51. attr.Size = entry.Attributes.FileSize
  52. log.Tracef("file Attr %s, open:%v, size: %d", file.fullpath(), file.isOpen, attr.Size)
  53. }
  54. attr.Crtime = time.Unix(entry.Attributes.Crtime, 0)
  55. attr.Mtime = time.Unix(entry.Attributes.Mtime, 0)
  56. attr.Gid = entry.Attributes.Gid
  57. attr.Uid = entry.Attributes.Uid
  58. attr.Blocks = attr.Size/blockSize + 1
  59. attr.BlockSize = uint32(file.wfs.option.ChunkSizeLimit)
  60. if entry.HardLinkCounter > 0 {
  61. attr.Nlink = uint32(entry.HardLinkCounter)
  62. }
  63. return nil
  64. }
  65. func (file *File) Getxattr(ctx context.Context, req *fuse.GetxattrRequest, resp *fuse.GetxattrResponse) error {
  66. log.Tracef("file Getxattr %s", file.fullpath())
  67. entry, err := file.maybeLoadEntry(ctx)
  68. if err != nil {
  69. return err
  70. }
  71. return getxattr(entry, req, resp)
  72. }
  73. func (file *File) Open(ctx context.Context, req *fuse.OpenRequest, resp *fuse.OpenResponse) (fs.Handle, error) {
  74. log.Tracef("file %v open %+v", file.fullpath(), req)
  75. handle := file.wfs.AcquireHandle(file, req.Uid, req.Gid)
  76. resp.Handle = fuse.HandleID(handle.handle)
  77. log.Tracef("%v file open handle id = %d", file.fullpath(), handle.handle)
  78. return handle, nil
  79. }
  80. func (file *File) Setattr(ctx context.Context, req *fuse.SetattrRequest, resp *fuse.SetattrResponse) error {
  81. log.Tracef("%v file setattr %+v", file.fullpath(), req)
  82. _, err := file.maybeLoadEntry(ctx)
  83. if err != nil {
  84. return err
  85. }
  86. if file.isOpen > 0 {
  87. file.wfs.handlesLock.Lock()
  88. fileHandle := file.wfs.handles[file.fullpath().AsInode()]
  89. file.wfs.handlesLock.Unlock()
  90. if fileHandle != nil {
  91. fileHandle.Lock()
  92. defer fileHandle.Unlock()
  93. }
  94. }
  95. if req.Valid.Size() {
  96. log.Tracef("%v file setattr set size=%v chunks=%d", file.fullpath(), req.Size, len(file.entry.Chunks))
  97. if req.Size < filer.FileSize(file.entry) {
  98. // fmt.Printf("truncate %v \n", fullPath)
  99. var chunks []*filer_pb.FileChunk
  100. var truncatedChunks []*filer_pb.FileChunk
  101. for _, chunk := range file.entry.Chunks {
  102. int64Size := int64(chunk.Size)
  103. if chunk.Offset+int64Size > int64(req.Size) {
  104. // this chunk is truncated
  105. int64Size = int64(req.Size) - chunk.Offset
  106. if int64Size > 0 {
  107. chunks = append(chunks, chunk)
  108. log.Tracef("truncated chunk %+v from %d to %d\n", chunk.GetFileIdString(), chunk.Size, int64Size)
  109. chunk.Size = uint64(int64Size)
  110. } else {
  111. log.Tracef("truncated whole chunk %+v\n", chunk.GetFileIdString())
  112. truncatedChunks = append(truncatedChunks, chunk)
  113. }
  114. }
  115. }
  116. file.entry.Chunks = chunks
  117. file.entryViewCache, _ = filer.NonOverlappingVisibleIntervals(filer.LookupFn(file.wfs), chunks)
  118. file.reader = nil
  119. file.wfs.deleteFileChunks(truncatedChunks)
  120. }
  121. file.entry.Attributes.FileSize = req.Size
  122. file.dirtyMetadata = true
  123. }
  124. if req.Valid.Mode() {
  125. file.entry.Attributes.FileMode = uint32(req.Mode)
  126. file.dirtyMetadata = true
  127. }
  128. if req.Valid.Uid() {
  129. file.entry.Attributes.Uid = req.Uid
  130. file.dirtyMetadata = true
  131. }
  132. if req.Valid.Gid() {
  133. file.entry.Attributes.Gid = req.Gid
  134. file.dirtyMetadata = true
  135. }
  136. if req.Valid.Crtime() {
  137. file.entry.Attributes.Crtime = req.Crtime.Unix()
  138. file.dirtyMetadata = true
  139. }
  140. if req.Valid.Mtime() {
  141. file.entry.Attributes.Mtime = req.Mtime.Unix()
  142. file.dirtyMetadata = true
  143. }
  144. if req.Valid.Handle() {
  145. // fmt.Printf("file handle => %d\n", req.Handle)
  146. }
  147. if file.isOpen > 0 {
  148. return nil
  149. }
  150. if !file.dirtyMetadata {
  151. return nil
  152. }
  153. return file.saveEntry(file.entry)
  154. }
  155. func (file *File) Setxattr(ctx context.Context, req *fuse.SetxattrRequest) error {
  156. log.Tracef("file Setxattr %s: %s", file.fullpath(), req.Name)
  157. entry, err := file.maybeLoadEntry(ctx)
  158. if err != nil {
  159. return err
  160. }
  161. if err := setxattr(entry, req); err != nil {
  162. return err
  163. }
  164. return file.saveEntry(entry)
  165. }
  166. func (file *File) Removexattr(ctx context.Context, req *fuse.RemovexattrRequest) error {
  167. log.Tracef("file Removexattr %s: %s", file.fullpath(), req.Name)
  168. entry, err := file.maybeLoadEntry(ctx)
  169. if err != nil {
  170. return err
  171. }
  172. if err := removexattr(entry, req); err != nil {
  173. return err
  174. }
  175. return file.saveEntry(entry)
  176. }
  177. func (file *File) Listxattr(ctx context.Context, req *fuse.ListxattrRequest, resp *fuse.ListxattrResponse) error {
  178. log.Tracef("file Listxattr %s", file.fullpath())
  179. entry, err := file.maybeLoadEntry(ctx)
  180. if err != nil {
  181. return err
  182. }
  183. if err := listxattr(entry, req, resp); err != nil {
  184. return err
  185. }
  186. return nil
  187. }
  188. func (file *File) Fsync(ctx context.Context, req *fuse.FsyncRequest) error {
  189. // fsync works at OS level
  190. // write the file chunks to the filerGrpcAddress
  191. log.Tracef("%s/%s fsync file %+v", file.dir.FullPath(), file.Name, req)
  192. return nil
  193. }
  194. func (file *File) Forget() {
  195. t := util.NewFullPath(file.dir.FullPath(), file.Name)
  196. log.Tracef("Forget file %s", t)
  197. file.wfs.fsNodeCache.DeleteFsNode(t)
  198. }
  199. func (file *File) maybeLoadEntry(ctx context.Context) (entry *filer_pb.Entry, err error) {
  200. entry = file.entry
  201. if file.isOpen > 0 {
  202. return entry, nil
  203. }
  204. if entry != nil {
  205. if len(entry.HardLinkId) == 0 {
  206. // only always reload hard link
  207. return entry, nil
  208. }
  209. }
  210. entry, err = file.wfs.maybeLoadEntry(file.dir.FullPath(), file.Name)
  211. if err != nil {
  212. log.Tracef("maybeLoadEntry file %s/%s: %v", file.dir.FullPath(), file.Name, err)
  213. return entry, err
  214. }
  215. if entry != nil {
  216. file.setEntry(entry)
  217. } else {
  218. log.Warnf("maybeLoadEntry not found entry %s/%s: %v", file.dir.FullPath(), file.Name, err)
  219. }
  220. return entry, nil
  221. }
  222. func lessThan(a, b *filer_pb.FileChunk) bool {
  223. if a.Mtime == b.Mtime {
  224. return a.Fid.FileKey < b.Fid.FileKey
  225. }
  226. return a.Mtime < b.Mtime
  227. }
  228. func (file *File) addChunks(chunks []*filer_pb.FileChunk) {
  229. // find the earliest incoming chunk
  230. newChunks := chunks
  231. earliestChunk := newChunks[0]
  232. for i := 1; i < len(newChunks); i++ {
  233. if lessThan(earliestChunk, newChunks[i]) {
  234. earliestChunk = newChunks[i]
  235. }
  236. }
  237. // pick out-of-order chunks from existing chunks
  238. for _, chunk := range file.entry.Chunks {
  239. if lessThan(earliestChunk, chunk) {
  240. chunks = append(chunks, chunk)
  241. }
  242. }
  243. // sort incoming chunks
  244. sort.Slice(chunks, func(i, j int) bool {
  245. return lessThan(chunks[i], chunks[j])
  246. })
  247. // add to entry view cache
  248. for _, chunk := range chunks {
  249. file.entryViewCache = filer.MergeIntoVisibles(file.entryViewCache, chunk)
  250. }
  251. file.reader = nil
  252. log.Tracef("%s existing %d chunks adds %d more", file.fullpath(), len(file.entry.Chunks), len(chunks))
  253. file.entry.Chunks = append(file.entry.Chunks, newChunks...)
  254. }
  255. func (file *File) setEntry(entry *filer_pb.Entry) {
  256. file.entry = entry
  257. file.entryViewCache, _ = filer.NonOverlappingVisibleIntervals(filer.LookupFn(file.wfs), entry.Chunks)
  258. file.reader = nil
  259. }
  260. func (file *File) clearEntry() {
  261. file.entry = nil
  262. file.entryViewCache = nil
  263. file.reader = nil
  264. }
  265. func (file *File) saveEntry(entry *filer_pb.Entry) error {
  266. return file.wfs.WithFilerClient(func(client filer_pb.SeaweedFilerClient) error {
  267. file.wfs.mapPbIdFromLocalToFiler(entry)
  268. defer file.wfs.mapPbIdFromFilerToLocal(entry)
  269. request := &filer_pb.UpdateEntryRequest{
  270. Directory: file.dir.FullPath(),
  271. Entry: entry,
  272. Signatures: []int32{file.wfs.signature},
  273. }
  274. log.Tracef("save file entry: %v", request)
  275. _, err := client.UpdateEntry(context.Background(), request)
  276. if err != nil {
  277. log.Errorf("UpdateEntry file %s/%s: %v", file.dir.FullPath(), file.Name, err)
  278. return fuse.EIO
  279. }
  280. file.wfs.metaCache.UpdateEntry(context.Background(), filer.FromPbEntry(request.Directory, request.Entry))
  281. return nil
  282. })
  283. }