videoPipeline.cpp 4.68 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149
// Copyright (c) 2012-2017 VideoStitch SAS
// Copyright (c) 2018 stitchEm

#include "videoPipeline.hpp"

#include "buffer.hpp"

#include "stitchOutput/stitchOutput.hpp"
#include "gpu/buffer.hpp"
#include "gpu/memcpy.hpp"
#include "gpu/stream.hpp"
#include "image/unpack.hpp"

#include "libvideostitch/gpu_device.hpp"

namespace VideoStitch {
namespace Core {

VideoPipeline::VideoPipeline(const std::vector<Input::VideoReader*>& readers,
                             const std::vector<PreProcessor*>& preprocs, PostProcessor* postproc)
    : postproc(postproc) {
  GPU::useDefaultBackendDevice();
  int i = 0;
  for (auto r : readers) {
    PreProcessor* pre = nullptr;
    if (!preprocs.empty()) {
      pre = preprocs[i++];
    }
    this->preprocs[r->id] = pre;
    this->readers[r->id] = r;
  }
}

VideoPipeline::~VideoPipeline() {
  for (auto s : streams) {
    s.second.destroy();
  }
  for (auto idb : inputDeviceBuffers) {
    idb.second.release();
  }
}

Potential<VideoPipeline> VideoPipeline::createVideoPipeline(const std::vector<Input::VideoReader*>& readers,
                                                            const std::vector<PreProcessor*>& preprocs,
                                                            PostProcessor* postproc) {
  VideoPipeline* ret = new VideoPipeline(readers, preprocs, postproc);

  Status initStatus = ret->init();

  if (!initStatus.ok()) {
    delete ret;
    ret = nullptr;
    return initStatus;
  }

  return ret;
}

Status VideoPipeline::init() {
  for (auto r : readers) {
    auto stream = GPU::Stream::create();
    if (!stream.ok()) {
      return stream.status();
    }
    streams[r.first] = stream.value();
    switch (r.second->getSpec().addressSpace) {
      case Host: {
        // device frames in original format
        auto idb = GPU::Buffer<unsigned char>::allocate(r.second->getFrameDataSize(), "Input Frames");

        if (!idb.ok()) {
          return idb.status();
        }
        inputDeviceBuffers[r.first] = idb.value();
        break;
      }
      case Device:
        break;
    }
  }

  return Status::OK();
}

Status VideoPipeline::extract(mtime_t date, FrameRate frameRate,
                              std::map<readerid_t, Input::PotentialFrame>& inputBuffers,
                              std::vector<ExtractOutput*> extracts, AlgorithmOutput* algo) {
  FAIL_RETURN(GPU::useDefaultBackendDevice());
  std::vector<std::pair<videoreaderid_t, GPU::Surface&>> frames;
  for (auto extract : extracts) {
    // TODO getStitchBuffer should return std::pair of buffer and stream
    GPU::Stream st;
    GPU::Surface& rbB = extract->pimpl->acquireFrame(date, st);
    FAIL_RETURN(
        extraction(inputBuffers.find(extract->pimpl->getSource())->second, extract->pimpl->getSource(), rbB, st));
    extract->pimpl->pushVideo(date);

    std::pair<videoreaderid_t, GPU::Surface&> p((int)frames.size(), rbB);
    frames.push_back(p);
  }

  if (algo != nullptr) {
    algo->onFrame(frames, date, frameRate);
  }

  return Status::OK();
}

Status VideoPipeline::extract(mtime_t date, std::map<readerid_t, Input::PotentialFrame>& inputBuffers,
                              ExtractOutput* extract) {
  FAIL_RETURN(GPU::useDefaultBackendDevice());
  GPU::Stream stream;
  GPU::Surface& readbackDevBuffer = extract->pimpl->acquireFrame(date, stream);
  FAIL_RETURN(extraction(inputBuffers.find(extract->pimpl->getSource())->second, extract->pimpl->getSource(),
                         readbackDevBuffer, stream));
  extract->pimpl->pushVideo(date);

  return Status::OK();
}

Status VideoPipeline::extraction(Input::PotentialFrame inputBuffer, int source, GPU::Surface& readbackDevBuffer,
                                 GPU::Stream stream) {
  const Input::VideoReader* reader = readers[source];
  const Input::VideoReader::Spec& spec = reader->getSpec();
  GPU::Buffer<unsigned char> inputDevBuffer;
  if (inputBuffer.status.ok()) {
    switch (inputBuffer.frame.addressSpace()) {
      case Host:
        FAIL_RETURN(
            GPU::memcpyAsync(inputDeviceBuffers[source], inputBuffer.frame.hostBuffer(), spec.frameDataSize, stream));
        inputDevBuffer = inputDeviceBuffers[source];
        break;
      case Device:
        inputDevBuffer = inputBuffer.frame.deviceBuffer();
        break;
    }
    FAIL_RETURN(Image::unpackCommonPixelFormat(spec.format, readbackDevBuffer, inputDevBuffer, spec.width, spec.height,
                                               stream));
  } else {
    // error policy : black frames in case of reader error/EOF
    // XXX TODO FIXME
    // GPU::memsetToZeroAsync(readbackDevBuffer,
    //                       spec.width * spec.height * 4,
    //                       stream);
  }
  return Status::OK();
}
}  // namespace Core
}  // namespace VideoStitch