ws.js 3.7 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134
  1. const crypto = require('crypto');
  2. const storage = require('../storage');
  3. const config = require('../config');
  4. const mozlog = require('../log');
  5. const Limiter = require('../limiter');
  6. const fxa = require('../fxa');
  7. const { statUploadEvent } = require('../amplitude');
  8. const { encryptedSize } = require('../../app/utils');
  9. const { Transform } = require('stream');
  10. const log = mozlog('send.upload');
  11. module.exports = function(ws, req) {
  12. let fileStream;
  13. ws.on('close', e => {
  14. if (e !== 1000 && fileStream !== undefined) {
  15. fileStream.destroy();
  16. }
  17. });
  18. ws.once('message', async function(message) {
  19. try {
  20. const newId = crypto.randomBytes(8).toString('hex');
  21. const owner = crypto.randomBytes(10).toString('hex');
  22. const fileInfo = JSON.parse(message);
  23. const timeLimit = fileInfo.timeLimit || config.default_expire_seconds;
  24. const dlimit = fileInfo.dlimit || 1;
  25. const metadata = fileInfo.fileMetadata;
  26. const auth = fileInfo.authorization;
  27. const user = await fxa.verify(fileInfo.bearer);
  28. const maxFileSize = user
  29. ? config.max_file_size
  30. : config.anon_max_file_size;
  31. const maxExpireSeconds = user
  32. ? config.max_expire_seconds
  33. : config.anon_max_expire_seconds;
  34. const maxDownloads = user
  35. ? config.max_downloads
  36. : config.anon_max_downloads;
  37. if (config.fxa_required && !user) {
  38. ws.send(
  39. JSON.stringify({
  40. error: 401
  41. })
  42. );
  43. return ws.close();
  44. }
  45. if (
  46. !metadata ||
  47. !auth ||
  48. timeLimit <= 0 ||
  49. timeLimit > maxExpireSeconds ||
  50. dlimit > maxDownloads
  51. ) {
  52. ws.send(
  53. JSON.stringify({
  54. error: 400
  55. })
  56. );
  57. return ws.close();
  58. }
  59. const meta = {
  60. owner,
  61. metadata,
  62. dlimit,
  63. auth: auth.split(' ')[1],
  64. nonce: crypto.randomBytes(16).toString('base64')
  65. };
  66. const protocol = config.env === 'production' ? 'https' : req.protocol;
  67. const url = `${protocol}://${req.get('host')}/download/${newId}/`;
  68. ws.send(
  69. JSON.stringify({
  70. url,
  71. ownerToken: meta.owner,
  72. id: newId
  73. })
  74. );
  75. const limiter = new Limiter(encryptedSize(maxFileSize));
  76. const eof = new Transform({
  77. transform: function(chunk, encoding, callback) {
  78. if (chunk.length === 1 && chunk[0] === 0) {
  79. this.push(null);
  80. } else {
  81. this.push(chunk);
  82. }
  83. callback();
  84. }
  85. });
  86. const wsStream = ws.constructor.createWebSocketStream(ws);
  87. fileStream = wsStream.pipe(eof).pipe(limiter); // limiter needs to be the last in the chain
  88. await storage.set(newId, fileStream, meta, timeLimit);
  89. if (ws.readyState === 1) {
  90. // if the socket is closed by a cancelled upload the stream
  91. // ends without an error so we need to check the state
  92. // before sending a reply.
  93. // TODO: we should handle cancelled uploads differently
  94. // in order to avoid having to check socket state and clean
  95. // up storage, possibly with an exception that we can catch.
  96. ws.send(JSON.stringify({ ok: true }));
  97. statUploadEvent({
  98. id: newId,
  99. ip: req.ip,
  100. owner,
  101. dlimit,
  102. timeLimit,
  103. anonymous: !user,
  104. size: limiter.length,
  105. agent: req.ua.browser.name || req.ua.ua.substring(0, 6)
  106. });
  107. }
  108. } catch (e) {
  109. log.error('upload', e);
  110. if (ws.readyState === 1) {
  111. ws.send(
  112. JSON.stringify({
  113. error: e === 'limit' ? 413 : 500
  114. })
  115. );
  116. }
  117. }
  118. ws.close();
  119. });
  120. };