123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120 |
- package azure
- import (
- "context"
- "crypto/rand"
- "encoding/base64"
- "errors"
- "fmt"
- "github.com/Azure/azure-pipeline-go/pipeline"
- . "github.com/Azure/azure-storage-blob-go/azblob"
- "io"
- "sync"
- )
- func uploadReaderAtToBlockBlob(ctx context.Context, reader io.ReaderAt, readerSize int64,
- blockBlobURL BlockBlobURL, o UploadToBlockBlobOptions) (CommonResponse, error) {
- if o.BlockSize == 0 {
-
- if readerSize > BlockBlobMaxStageBlockBytes*BlockBlobMaxBlocks {
- return nil, errors.New("buffer is too large to upload to a block blob")
- }
-
- if readerSize <= BlockBlobMaxUploadBlobBytes {
- o.BlockSize = BlockBlobMaxUploadBlobBytes
- } else {
- o.BlockSize = readerSize / BlockBlobMaxBlocks
- if o.BlockSize < BlobDefaultDownloadBlockSize {
- o.BlockSize = BlobDefaultDownloadBlockSize
- }
-
- }
- }
- if readerSize <= BlockBlobMaxUploadBlobBytes {
-
- var body io.ReadSeeker = io.NewSectionReader(reader, 0, readerSize)
- if o.Progress != nil {
- body = pipeline.NewRequestBodyProgress(body, o.Progress)
- }
- return blockBlobURL.Upload(ctx, body, o.BlobHTTPHeaders, o.Metadata, o.AccessConditions, o.BlobAccessTier, o.BlobTagsMap, o.ClientProvidedKeyOptions, o.ImmutabilityPolicyOptions)
- }
- var numBlocks = uint16(((readerSize - 1) / o.BlockSize) + 1)
- blockIDList := make([]string, numBlocks)
- progress := int64(0)
- progressLock := &sync.Mutex{}
- err := DoBatchTransfer(ctx, BatchTransferOptions{
- OperationName: "uploadReaderAtToBlockBlob",
- TransferSize: readerSize,
- ChunkSize: o.BlockSize,
- Parallelism: o.Parallelism,
- Operation: func(offset int64, count int64, ctx context.Context) error {
-
-
-
- var body io.ReadSeeker = io.NewSectionReader(reader, offset, count)
- blockNum := offset / o.BlockSize
- if o.Progress != nil {
- blockProgress := int64(0)
- body = pipeline.NewRequestBodyProgress(body,
- func(bytesTransferred int64) {
- diff := bytesTransferred - blockProgress
- blockProgress = bytesTransferred
- progressLock.Lock()
- progress += diff
- o.Progress(progress)
- progressLock.Unlock()
- })
- }
-
-
- blockIDList[blockNum] = base64.StdEncoding.EncodeToString(newUUID().bytes())
- _, err := blockBlobURL.StageBlock(ctx, blockIDList[blockNum], body, o.AccessConditions.LeaseAccessConditions, nil, o.ClientProvidedKeyOptions)
- return err
- },
- })
- if err != nil {
- return nil, err
- }
-
- return blockBlobURL.CommitBlockList(ctx, blockIDList, o.BlobHTTPHeaders, o.Metadata, o.AccessConditions, o.BlobAccessTier, o.BlobTagsMap, o.ClientProvidedKeyOptions, o.ImmutabilityPolicyOptions)
- }
- const (
- reservedNCS byte = 0x80
- reservedRFC4122 byte = 0x40
- reservedMicrosoft byte = 0x20
- reservedFuture byte = 0x00
- )
- type uuid [16]byte
- func newUUID() (u uuid) {
- u = uuid{}
-
- rand.Read(u[:])
- u[8] = (u[8] | reservedRFC4122) & 0x7F
- var version byte = 4
- u[6] = (u[6] & 0xF) | (version << 4)
- return
- }
- func (u uuid) String() string {
- return fmt.Sprintf("%x-%x-%x-%x-%x", u[0:4], u[4:6], u[6:8], u[8:10], u[10:])
- }
- func (u uuid) bytes() []byte {
- return u[:]
- }
|