Skip to content

Commit

Permalink
Merge pull request #9 from zhiics/coalesce-storage
Browse files Browse the repository at this point in the history
some fixes
  • Loading branch information
jroesch authored Apr 22, 2020
2 parents e4b1391 + 439d59d commit 36da8f2
Show file tree
Hide file tree
Showing 4 changed files with 6 additions and 8 deletions.
1 change: 1 addition & 0 deletions include/tvm/runtime/vm.h
Original file line number Diff line number Diff line change
Expand Up @@ -270,6 +270,7 @@ struct Instruction {
/*!
* \brief Construct an allocate tensor instruction with constant shape.
* \param storage The storage to allocate out of.
* \param offset The offset into the storage to allocate from.
* \param shape The shape of the tensor.
* \param dtype The dtype of the tensor.
* \param dst The destination register.
Expand Down
2 changes: 1 addition & 1 deletion python/tvm/relay/transform/memory_alloc.py
Original file line number Diff line number Diff line change
Expand Up @@ -174,7 +174,7 @@ def dynamic_invoke(self, scope, func, ins, new_args, out_types, ret_type):
size = self.compute_storage_in_relay(
out_shape, out_type.dtype)
alignment = self.compute_alignment(out_type.dtype)
sto = scope.let("storage_{i}".format(i=i), self.alloc_storage(
sto = scope.let("storage_{i}".format(i=i), alloc_storage(
size, alignment, self.default_context, out_type.dtype))
storages.append(sto)

Expand Down
5 changes: 1 addition & 4 deletions src/relay/backend/vm/compiler.cc
Original file line number Diff line number Diff line change
Expand Up @@ -906,8 +906,6 @@ transform::Sequential MemoryOpt(tvm::Target host_target) {

// Perform memory planning in order to coalesce/reduce allocations.
pass_seqs.push_back(transform::MemoryPlan());
// Compute away possibly introduced constant computation.
pass_seqs.push_back(transform::FoldConstant());

return transform::Sequential(pass_seqs);
}
Expand Down Expand Up @@ -965,8 +963,7 @@ IRModule VMCompiler::OptimizeModule(const IRModule& mod, const TargetsMap& targe
pass_seqs.push_back(transform::LambdaLift());
pass_seqs.push_back(transform::InlinePrimitives());



// Memory optimization
pass_seqs.push_back(MemoryOpt(this->target_host_));

transform::Sequential seq(pass_seqs);
Expand Down
6 changes: 3 additions & 3 deletions src/runtime/vm/executable.cc
Original file line number Diff line number Diff line change
Expand Up @@ -314,7 +314,7 @@ VMInstructionSerializer SerializeInstruction(const Instruction& instr) {
break;
}
case Opcode::AllocTensor: {
// Number of fields = 6 + instr.alloc_tensor.ndim
// Number of fields = 7 + instr.alloc_tensor.ndim
fields.push_back(instr.alloc_tensor.storage);
fields.push_back(instr.alloc_tensor.offset);
// Save `DLDataType` and the dst register.
Expand Down Expand Up @@ -565,7 +565,7 @@ Instruction DeserializeInstruction(const VMInstructionSerializer& instr) {
return Instruction::InvokePacked(packed_index, arity, output_size, args);
}
case Opcode::AllocTensor: {
// Number of fields = 6 + instr.alloc_tensor.ndim
// Number of fields = 7 + instr.alloc_tensor.ndim
DCHECK_GE(instr.fields.size(), 7U);
DCHECK_EQ(instr.fields.size(), 7U + static_cast<size_t>(instr.fields[4]));

Expand All @@ -580,7 +580,7 @@ Instruction DeserializeInstruction(const VMInstructionSerializer& instr) {
Index ndim = instr.fields[5];
RegName dst = instr.fields[6];

std::vector<Index> shape = ExtractFields(instr.fields, 6, ndim);
std::vector<Index> shape = ExtractFields(instr.fields, 7, ndim);

return Instruction::AllocTensor(storage_reg, offset, shape, dtype, dst);
}
Expand Down

0 comments on commit 36da8f2

Please sign in to comment.