ws.js 3.5 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126
  1. const crypto = require('crypto');
  2. const storage = require('../storage');
  3. const config = require('../config');
  4. const mozlog = require('../log');
  5. const Limiter = require('../limiter');
  6. const fxa = require('../fxa');
  7. const { statUploadEvent } = require('../amplitude');
  8. const { encryptedSize } = require('../../app/utils');
  9. const { Transform } = require('stream');
  10. const log = mozlog('send.upload');
  11. module.exports = function(ws, req) {
  12. let fileStream;
  13. ws.on('close', e => {
  14. if (e !== 1000 && fileStream !== undefined) {
  15. fileStream.destroy();
  16. }
  17. });
  18. ws.once('message', async function(message) {
  19. try {
  20. const newId = crypto.randomBytes(8).toString('hex');
  21. const owner = crypto.randomBytes(10).toString('hex');
  22. const fileInfo = JSON.parse(message);
  23. const timeLimit = fileInfo.timeLimit || config.default_expire_seconds;
  24. const dlimit = fileInfo.dlimit || 1;
  25. const metadata = fileInfo.fileMetadata;
  26. const auth = fileInfo.authorization;
  27. const user = await fxa.verify(fileInfo.bearer);
  28. const maxFileSize = user
  29. ? config.max_file_size
  30. : config.anon_max_file_size;
  31. const maxExpireSeconds = user
  32. ? config.max_expire_seconds
  33. : config.anon_max_expire_seconds;
  34. const maxDownloads = user
  35. ? config.max_downloads
  36. : config.anon_max_downloads;
  37. if (
  38. !metadata ||
  39. !auth ||
  40. timeLimit <= 0 ||
  41. timeLimit > maxExpireSeconds ||
  42. dlimit > maxDownloads
  43. ) {
  44. ws.send(
  45. JSON.stringify({
  46. error: 400
  47. })
  48. );
  49. return ws.close();
  50. }
  51. const meta = {
  52. owner,
  53. metadata,
  54. dlimit,
  55. auth: auth.split(' ')[1],
  56. nonce: crypto.randomBytes(16).toString('base64')
  57. };
  58. const protocol = config.env === 'production' ? 'https' : req.protocol;
  59. const url = `${protocol}://${req.get('host')}/download/${newId}/`;
  60. ws.send(
  61. JSON.stringify({
  62. url,
  63. ownerToken: meta.owner,
  64. id: newId
  65. })
  66. );
  67. const limiter = new Limiter(encryptedSize(maxFileSize));
  68. const eof = new Transform({
  69. transform: function(chunk, encoding, callback) {
  70. if (chunk.length === 1 && chunk[0] === 0) {
  71. this.push(null);
  72. } else {
  73. this.push(chunk);
  74. }
  75. callback();
  76. }
  77. });
  78. const wsStream = ws.constructor.createWebSocketStream(ws);
  79. fileStream = wsStream.pipe(eof).pipe(limiter); // limiter needs to be the last in the chain
  80. await storage.set(newId, fileStream, meta, timeLimit);
  81. if (ws.readyState === 1) {
  82. // if the socket is closed by a cancelled upload the stream
  83. // ends without an error so we need to check the state
  84. // before sending a reply.
  85. // TODO: we should handle cancelled uploads differently
  86. // in order to avoid having to check socket state and clean
  87. // up storage, possibly with an exception that we can catch.
  88. ws.send(JSON.stringify({ ok: true }));
  89. statUploadEvent({
  90. id: newId,
  91. ip: req.ip,
  92. owner,
  93. dlimit,
  94. timeLimit,
  95. anonymous: !user,
  96. size: limiter.length,
  97. agent: req.ua.browser.name || req.ua.ua.substring(0, 6)
  98. });
  99. }
  100. } catch (e) {
  101. log.error('upload', e);
  102. if (ws.readyState === 1) {
  103. ws.send(
  104. JSON.stringify({
  105. error: e === 'limit' ? 413 : 500
  106. })
  107. );
  108. }
  109. }
  110. ws.close();
  111. });
  112. };