topology_vacuum.go 7.9 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238
  1. package topology
  2. import (
  3. "context"
  4. "github.com/seaweedfs/seaweedfs/weed/pb"
  5. "io"
  6. "sync/atomic"
  7. "time"
  8. "google.golang.org/grpc"
  9. "github.com/seaweedfs/seaweedfs/weed/storage/needle"
  10. "github.com/seaweedfs/seaweedfs/weed/glog"
  11. "github.com/seaweedfs/seaweedfs/weed/operation"
  12. "github.com/seaweedfs/seaweedfs/weed/pb/volume_server_pb"
  13. )
  14. func (t *Topology) batchVacuumVolumeCheck(grpcDialOption grpc.DialOption, vid needle.VolumeId,
  15. locationlist *VolumeLocationList, garbageThreshold float64) (*VolumeLocationList, bool) {
  16. ch := make(chan int, locationlist.Length())
  17. errCount := int32(0)
  18. for index, dn := range locationlist.list {
  19. go func(index int, url pb.ServerAddress, vid needle.VolumeId) {
  20. err := operation.WithVolumeServerClient(false, url, grpcDialOption, func(volumeServerClient volume_server_pb.VolumeServerClient) error {
  21. resp, err := volumeServerClient.VacuumVolumeCheck(context.Background(), &volume_server_pb.VacuumVolumeCheckRequest{
  22. VolumeId: uint32(vid),
  23. })
  24. if err != nil {
  25. atomic.AddInt32(&errCount, 1)
  26. ch <- -1
  27. return err
  28. }
  29. if resp.GarbageRatio >= garbageThreshold {
  30. ch <- index
  31. } else {
  32. ch <- -1
  33. }
  34. return nil
  35. })
  36. if err != nil {
  37. glog.V(0).Infof("Checking vacuuming %d on %s: %v", vid, url, err)
  38. }
  39. }(index, dn.ServerAddress(), vid)
  40. }
  41. vacuumLocationList := NewVolumeLocationList()
  42. waitTimeout := time.NewTimer(time.Minute * time.Duration(t.volumeSizeLimit/1024/1024/1000+1))
  43. defer waitTimeout.Stop()
  44. for range locationlist.list {
  45. select {
  46. case index := <-ch:
  47. if index != -1 {
  48. vacuumLocationList.list = append(vacuumLocationList.list, locationlist.list[index])
  49. }
  50. case <-waitTimeout.C:
  51. return vacuumLocationList, false
  52. }
  53. }
  54. return vacuumLocationList, errCount == 0 && len(vacuumLocationList.list) > 0
  55. }
  56. func (t *Topology) batchVacuumVolumeCompact(grpcDialOption grpc.DialOption, vl *VolumeLayout, vid needle.VolumeId,
  57. locationlist *VolumeLocationList, preallocate int64) bool {
  58. vl.accessLock.Lock()
  59. vl.removeFromWritable(vid)
  60. vl.accessLock.Unlock()
  61. ch := make(chan bool, locationlist.Length())
  62. for index, dn := range locationlist.list {
  63. go func(index int, url pb.ServerAddress, vid needle.VolumeId) {
  64. glog.V(0).Infoln(index, "Start vacuuming", vid, "on", url)
  65. err := operation.WithVolumeServerClient(true, url, grpcDialOption, func(volumeServerClient volume_server_pb.VolumeServerClient) error {
  66. stream, err := volumeServerClient.VacuumVolumeCompact(context.Background(), &volume_server_pb.VacuumVolumeCompactRequest{
  67. VolumeId: uint32(vid),
  68. Preallocate: preallocate,
  69. })
  70. if err != nil {
  71. return err
  72. }
  73. for {
  74. resp, recvErr := stream.Recv()
  75. if recvErr != nil {
  76. if recvErr == io.EOF {
  77. break
  78. } else {
  79. return recvErr
  80. }
  81. }
  82. glog.V(0).Infof("%d vacuum %d on %s processed %d bytes, loadAvg %.02f%%",
  83. index, vid, url, resp.ProcessedBytes, resp.LoadAvg_1M*100)
  84. }
  85. return nil
  86. })
  87. if err != nil {
  88. glog.Errorf("Error when vacuuming %d on %s: %v", vid, url, err)
  89. ch <- false
  90. } else {
  91. glog.V(0).Infof("Complete vacuuming %d on %s", vid, url)
  92. ch <- true
  93. }
  94. }(index, dn.ServerAddress(), vid)
  95. }
  96. isVacuumSuccess := true
  97. waitTimeout := time.NewTimer(3 * time.Minute * time.Duration(t.volumeSizeLimit/1024/1024/1000+1))
  98. defer waitTimeout.Stop()
  99. for range locationlist.list {
  100. select {
  101. case canCommit := <-ch:
  102. isVacuumSuccess = isVacuumSuccess && canCommit
  103. case <-waitTimeout.C:
  104. return false
  105. }
  106. }
  107. return isVacuumSuccess
  108. }
  109. func (t *Topology) batchVacuumVolumeCommit(grpcDialOption grpc.DialOption, vl *VolumeLayout, vid needle.VolumeId, locationlist *VolumeLocationList) bool {
  110. isCommitSuccess := true
  111. isReadOnly := false
  112. for _, dn := range locationlist.list {
  113. glog.V(0).Infoln("Start Committing vacuum", vid, "on", dn.Url())
  114. err := operation.WithVolumeServerClient(false, dn.ServerAddress(), grpcDialOption, func(volumeServerClient volume_server_pb.VolumeServerClient) error {
  115. resp, err := volumeServerClient.VacuumVolumeCommit(context.Background(), &volume_server_pb.VacuumVolumeCommitRequest{
  116. VolumeId: uint32(vid),
  117. })
  118. if resp != nil && resp.IsReadOnly {
  119. isReadOnly = true
  120. }
  121. return err
  122. })
  123. if err != nil {
  124. glog.Errorf("Error when committing vacuum %d on %s: %v", vid, dn.Url(), err)
  125. isCommitSuccess = false
  126. } else {
  127. glog.V(0).Infof("Complete Committing vacuum %d on %s", vid, dn.Url())
  128. }
  129. }
  130. if isCommitSuccess {
  131. for _, dn := range locationlist.list {
  132. vl.SetVolumeAvailable(dn, vid, isReadOnly)
  133. }
  134. }
  135. return isCommitSuccess
  136. }
  137. func (t *Topology) batchVacuumVolumeCleanup(grpcDialOption grpc.DialOption, vl *VolumeLayout, vid needle.VolumeId, locationlist *VolumeLocationList) {
  138. for _, dn := range locationlist.list {
  139. glog.V(0).Infoln("Start cleaning up", vid, "on", dn.Url())
  140. err := operation.WithVolumeServerClient(false, dn.ServerAddress(), grpcDialOption, func(volumeServerClient volume_server_pb.VolumeServerClient) error {
  141. _, err := volumeServerClient.VacuumVolumeCleanup(context.Background(), &volume_server_pb.VacuumVolumeCleanupRequest{
  142. VolumeId: uint32(vid),
  143. })
  144. return err
  145. })
  146. if err != nil {
  147. glog.Errorf("Error when cleaning up vacuum %d on %s: %v", vid, dn.Url(), err)
  148. } else {
  149. glog.V(0).Infof("Complete cleaning up vacuum %d on %s", vid, dn.Url())
  150. }
  151. }
  152. }
  153. func (t *Topology) Vacuum(grpcDialOption grpc.DialOption, garbageThreshold float64, volumeId uint32, collection string, preallocate int64) {
  154. // if there is vacuum going on, return immediately
  155. swapped := atomic.CompareAndSwapInt64(&t.vacuumLockCounter, 0, 1)
  156. if !swapped {
  157. return
  158. }
  159. defer atomic.StoreInt64(&t.vacuumLockCounter, 0)
  160. // now only one vacuum process going on
  161. glog.V(1).Infof("Start vacuum on demand with threshold: %f collection: %s volumeId: %d",
  162. garbageThreshold, collection, volumeId)
  163. for _, col := range t.collectionMap.Items() {
  164. c := col.(*Collection)
  165. if collection != "" && collection != c.Name {
  166. continue
  167. }
  168. for _, vl := range c.storageType2VolumeLayout.Items() {
  169. if vl != nil {
  170. volumeLayout := vl.(*VolumeLayout)
  171. if volumeId > 0 {
  172. vid := needle.VolumeId(volumeId)
  173. volumeLayout.accessLock.RLock()
  174. locationList, ok := volumeLayout.vid2location[vid]
  175. volumeLayout.accessLock.RUnlock()
  176. if ok {
  177. t.vacuumOneVolumeId(grpcDialOption, volumeLayout, c, garbageThreshold, locationList, vid, preallocate)
  178. }
  179. } else {
  180. t.vacuumOneVolumeLayout(grpcDialOption, volumeLayout, c, garbageThreshold, preallocate)
  181. }
  182. }
  183. }
  184. }
  185. }
  186. func (t *Topology) vacuumOneVolumeLayout(grpcDialOption grpc.DialOption, volumeLayout *VolumeLayout, c *Collection, garbageThreshold float64, preallocate int64) {
  187. volumeLayout.accessLock.RLock()
  188. tmpMap := make(map[needle.VolumeId]*VolumeLocationList)
  189. for vid, locationList := range volumeLayout.vid2location {
  190. tmpMap[vid] = locationList.Copy()
  191. }
  192. volumeLayout.accessLock.RUnlock()
  193. for vid, locationList := range tmpMap {
  194. t.vacuumOneVolumeId(grpcDialOption, volumeLayout, c, garbageThreshold, locationList, vid, preallocate)
  195. }
  196. }
  197. func (t *Topology) vacuumOneVolumeId(grpcDialOption grpc.DialOption, volumeLayout *VolumeLayout, c *Collection, garbageThreshold float64, locationList *VolumeLocationList, vid needle.VolumeId, preallocate int64) {
  198. volumeLayout.accessLock.RLock()
  199. isReadOnly := volumeLayout.readonlyVolumes.IsTrue(vid)
  200. isEnoughCopies := volumeLayout.enoughCopies(vid)
  201. volumeLayout.accessLock.RUnlock()
  202. if isReadOnly || !isEnoughCopies {
  203. return
  204. }
  205. glog.V(2).Infof("check vacuum on collection:%s volume:%d", c.Name, vid)
  206. if vacuumLocationList, needVacuum := t.batchVacuumVolumeCheck(
  207. grpcDialOption, vid, locationList, garbageThreshold); needVacuum {
  208. if t.batchVacuumVolumeCompact(grpcDialOption, volumeLayout, vid, vacuumLocationList, preallocate) {
  209. t.batchVacuumVolumeCommit(grpcDialOption, volumeLayout, vid, vacuumLocationList)
  210. } else {
  211. t.batchVacuumVolumeCleanup(grpcDialOption, volumeLayout, vid, vacuumLocationList)
  212. }
  213. }
  214. }