Skip to content

Commit

Permalink
Make Tensor comparator and hash to be aware of same op and index, ini…
Browse files Browse the repository at this point in the history
…t checkin of the ir generation
  • Loading branch information
tqchen committed Jan 9, 2017
1 parent eee0ebe commit 302c2e6
Show file tree
Hide file tree
Showing 9 changed files with 311 additions and 6 deletions.
13 changes: 13 additions & 0 deletions include/tvm/operation.h
Original file line number Diff line number Diff line change
Expand Up @@ -86,4 +86,17 @@ inline Tensor Compute(Array<Expr> shape,

} // namespace tvm


namespace std {
template <>
struct hash<::tvm::Tensor> {
std::size_t operator()(const ::tvm::Tensor& k) const {
if (k.defined() && k->op.defined()) {
return k->op.hash();
} else{
return k.hash();
}
}
};
} // namespace std
#endif // TVM_OPERATION_H_
17 changes: 17 additions & 0 deletions include/tvm/tensor.h
Original file line number Diff line number Diff line change
Expand Up @@ -47,6 +47,12 @@ class Tensor : public FunctionRef {
* \return the pointer to the internal node container
*/
inline const TensorNode* operator->() const;
/*!
* \brief check if two tensors equals each other.
* \param other tensor to be checked.
* \return whether the two tensors equals each other.
*/
inline bool operator==(const Tensor& other) const;
/*! \return The dimension of the tensor */
inline size_t ndim() const;
/*!
Expand Down Expand Up @@ -201,6 +207,17 @@ inline size_t Tensor::ndim() const {
return (*this)->shape.size();
}

inline bool Tensor::operator==(const Tensor& other) const {
if (get() == other.get()) return true;
if (get() == nullptr || other.get() == nullptr) return false;
if ((*this)->op.defined() || other->op.defined()) {
return (*this)->op == other->op &&
(*this)->value_index == other->value_index;
} else {
return false;
}
}

// macro to turn every operation of slice to expression
#define DEFINE_OVERLOAD_SLICE_UNARY_OP(Op) \
inline Expr operator Op (const Tensor::Slice& a) { \
Expand Down
5 changes: 4 additions & 1 deletion python/tvm/_ctypes/_api.py
Original file line number Diff line number Diff line change
Expand Up @@ -29,9 +29,12 @@ class ArgVariant(ctypes.Union):
def _type_key(handle):
ret_val = ArgVariant()
ret_typeid = ctypes.c_int()
ret_success = ctypes.c_int()
check_call(_LIB.TVMNodeGetAttr(
handle, c_str("type_key"),
ctypes.byref(ret_val), ctypes.byref(ret_typeid)))
ctypes.byref(ret_val),
ctypes.byref(ret_typeid),
ctypes.byref(ret_success)))
return py_str(ret_val.v_str)

NODE_TYPE = {
Expand Down
30 changes: 30 additions & 0 deletions python/tvm/tensor.py
Original file line number Diff line number Diff line change
@@ -1,6 +1,7 @@
from __future__ import absolute_import as _abs
from ._ctypes._api import NodeBase, SliceBase, register_node, convert
from . import collections as _collections
from . import _function_internal
from . import make as _make
from . import expr as _expr

Expand Down Expand Up @@ -38,6 +39,35 @@ def __call__(self, *indices):
def __getitem__(self, indices):
return TensorSlice(self, indices)

def __hash__(self):
return _function_internal._TensorHash(self)

def __eq__(self, other):
if not isinstance(other, Tensor):
return False
return _function_internal._TensorEqual(self, other)

@property
def ndim(self):
return len(self.shape)


class Operation(NodeBase):
def output(self, index):
"""Get the index-th output of the operation
Parameters
----------
index : int
The index size.
Returns
-------
out : Tensor
The i-th output.
"""
return _function_internal._OpGetOutput(self, index)

@register_node
class ComputeOp(Operation):
pass
17 changes: 17 additions & 0 deletions src/c_api/c_api_lang.cc
Original file line number Diff line number Diff line change
Expand Up @@ -149,13 +149,30 @@ TVM_REGISTER_API(_Tensor)
args.at(4));
});

TVM_REGISTER_API(_TensorEqual)
.set_body([](const ArgStack& args, RetValue *ret) {
*ret = args.at(0).operator Tensor() == args.at(1).operator Tensor();
});

TVM_REGISTER_API(_TensorHash)
.set_body([](const ArgStack& args, RetValue *ret) {
*ret = static_cast<int64_t>(
std::hash<Tensor>()(args.at(0).operator Tensor()));
});

TVM_REGISTER_API(_ComputeOp)
.set_body([](const ArgStack& args, RetValue *ret) {
*ret = ComputeOpNode::make(args.at(0),
args.at(1),
args.at(2));
});

TVM_REGISTER_API(_OpGetOutput)
.set_body([](const ArgStack& args, RetValue *ret) {
*ret = args.at(0).operator Operation().output(
args.at(1).operator size_t());
});


TVM_REGISTER_API(_IterVar)
.set_body([](const ArgStack& args, RetValue *ret) {
Expand Down
214 changes: 214 additions & 0 deletions src/pass/schedule_ops.cc
Original file line number Diff line number Diff line change
Expand Up @@ -5,11 +5,225 @@
#include <tvm/ir.h>
#include <tvm/ir_mutator.h>
#include <tvm/ir_pass.h>
#include <tvm/ir_visitor.h>
#include "./scope.h"

namespace tvm {
namespace ir {
namespace {

/*!
* \brief use message passing to calculate the assignment of each Var inside the loop body.
* \param s The schedule to be used.
* \param dom_map The domain map of each iteration variable's domain
* \param p_state The message passing state
* IterVar->The assignment.
*/
void PassUpOffset(const Schedule& s,
const std::unordered_map<IterVar, Range>& dom_map,
std::unordered_map<IterVar, Expr>* p_state) {
auto& state = *p_state;
for (size_t i = s->relations.size(); i != 0; --i) {
IterVarRelation rel = s->relations[i - 1];
if (rel.as<SplitNode>()) {
const SplitNode* s = rel.as<SplitNode>();
Expr outer = state.at(s->outer);
Expr inner = state.at(s->outer);
Expr factor = dom_map.at(s->outer)->extent;
Expr offset = inner + outer * factor;
Expr outer_min = dom_map.at(s->parent)->min;
if (!is_zero(outer_min)) {
offset = outer_min + offset;
}
state[s->parent] = offset;
} else if (rel.as<FuseNode>()) {
const FuseNode* s = rel.as<FuseNode>();
Expr value = state.at(s->fused);
Expr factor = dom_map.at(s->outer)->extent;
state[s->outer] = value / factor;
state[s->inner] = value % factor;
} else {
LOG(FATAL) << "unknown relation type";
}
}
}

/*!
* \brief split the expr by addition.
* \param expr The expression to be splitted.
* \param loop_level The loop level of each Variable
* \param result vector of (level, expr)
* The level gives the mimimum loop level this expression need to be computed.
* The Expr gives the expression content.
*/
void SplitByAdd(Expr expr,
const std::unordered_map<const Variable*, size_t>& loop_level,
std::vector<std::pair<size_t, Expr> > *result) {
const Add* op = expr.as<Add>();
if (op != nullptr) {
SplitByAdd(op->a, loop_level, result);
SplitByAdd(op->b, loop_level, result);
} else {
size_t max_level = 0;
auto fvisit = [&max_level, &loop_level](const NodeRef& n) {
const Variable* op = n.as<Variable>();
if (op != nullptr) {
auto it = loop_level.find(op);
if (it != loop_level.end()) {
max_level = std::max(max_level, it->second);
}
}
};
PostOrderVisit(expr, fvisit);
result->push_back(std::make_pair(max_level, expr));
}
}

/*!
* \brief combine the nest stmt, whose body is not defined.
* \param nest A list of For and LetStmt, whose body is not defined.
* \param body body
*/
Stmt CombineNest(std::vector<Stmt>&& nest, Stmt body) {
while (!nest.empty()) {
Stmt s = std::move(nest.back());
nest.pop_back();
if (s.as<For>()) {
auto n = std::make_shared<For>(*s.as<For>());
n->body = body;
body = Stmt(n);
} else if (s.as<LetStmt>()) {
auto n = std::make_shared<LetStmt>(*s.as<LetStmt>());
n->body = body;
body = Stmt(n);
} else if (s.as<AttrStmt>()) {
auto n = std::make_shared<AttrStmt>(*s.as<AttrStmt>());
n->body = body;
body = Stmt(n);
} else {
LOG(FATAL) << "not supported nest type";
}
}
return body;
}

/*!
* \brief Make the loop nest of the correspondings schedule.
* \param sch The schedule.
* \param dom_map The domain map.
*/
std::vector<Stmt> MakeLoopNest(
const Schedule& sch,
const std::unordered_map<IterVar, Range>& dom_map) {
// optional, use let to define some CSE in dom_map.
auto leaf_iter_vars = sch->leaf_iter_vars;
std::unordered_map<IterVar, Expr> offset;
std::unordered_map<const Variable*, size_t> loop_level;

// create the loop nest
std::vector<Stmt> nest;
nest.resize(leaf_iter_vars.size() + 1, Stmt());

for (size_t i = 0; i < leaf_iter_vars.size(); ++i) {
auto iv = leaf_iter_vars[i];
// initialize the offset and loop_level
offset[iv] = iv->var;
loop_level[iv->var.as<Variable>()] = i + 1;

nest[i] = AttrStmt::make(iv->var, "scope", iv, Stmt());
if (iv->thread_tag.length() == 0) {
Range dom = dom_map.at(iv);
nest[i] = For::make(iv->var, dom->min, dom->extent,
ForType::Serial, DeviceAPI::None, nest[i]);
}
}
// message passing to get offset of root iter vars.
PassUpOffset(sch, dom_map, &offset);
for (IterVar iv : sch->op->root_iter_vars()) {
Expr value = offset.at(iv);
if (value.same_as(iv->var)) continue;
using Entry = std::pair<size_t, Expr>;
std::vector<Entry> splits;
SplitByAdd(value, loop_level, &splits);

Expr offset = 0;
for (size_t i = 0; i <= leaf_iter_vars.size(); ++i) {
auto iv = leaf_iter_vars[i];
for (const auto& kv : splits) {
if (kv.first == i) {
offset = offset + splits[i].second;
}
}
std::ostringstream os;
os << iv->var->name_hint << ".at.l" << i;
Var base_offset(os.str());
nest[i] = LetStmt::make(base_offset, offset, nest[i]);
offset = base_offset;
}
nest.back() = LetStmt::make(iv->var, offset, nest.back());
}
return nest;
}

/*!
* \brief Make the loop nest of the correspondings schedule.
* \param op The operation.
*/
Stmt MakeBody(const Operation& op) {
Stmt body;
if (op.as<ComputeOpNode>()) {
const ComputeOpNode* compute = op.as<ComputeOpNode>();
// Note: Tensor's address cannot uniquely
Tensor t = op.output(0);
Array<Expr> args;
for (IterVar iv : compute->axis) {
args.push_back(iv->var);
}
body = Provide::make(t, {compute->body}, args);
} else {
LOG(FATAL) << "not supported op";
}
return body;
}

Stmt MakePipeline(const Schedule& sch, Stmt body) {
return body;
}

// inject the operator's realization on the stmt.
class InjectRealize : public IRMutator {
public:
explicit InjectRealize(Schedule sch)
: sch_(sch) {}

Stmt Mutate(Stmt stmt) final {
const AttrStmt* op = stmt.as<AttrStmt>();
if (op != nullptr) {
attr_scope_.Push({op->node, op->type_key}, op->value);
stmt = IRMutator::Mutate(stmt);
attr_scope_.Pop({op->node, op->type_key});
} else {
stmt = IRMutator::Mutate(stmt);
}

if (op != nullptr &&
op->type_key == "scope" &&
op->node == sch_->attach_parent) {
return AttrStmt::make(
op->node, op->type_key, op->value,
MakePipeline(sch_, op->body));
} else {
return stmt;
}
}

private:
// the operations to be carried
Schedule sch_;
Scope<AttrKey, Expr> attr_scope_;
};


} // namespace
} // namespace ir
} // namespace tvm
6 changes: 5 additions & 1 deletion src/schedule/bound.cc
Original file line number Diff line number Diff line change
Expand Up @@ -101,7 +101,11 @@ void PassToOperation(
const Tensor& tensor,
const std::vector<IntSet>& dim_bounds,
std::unordered_map<IterVar, std::vector<IntSet> >* result) {

// This is a push style operation, given output bound, push to the op IterVar bound.
// It cannot handle complicated cases where op bound is coupled with bounds of
// all of its outputs, without having a simple communicative union relation.
//
// Eventually, we need to change the inference to be a Pull style inference
if (tensor->op.as<ComputeOpNode>()) {
auto root_iter_vars = tensor->op->root_iter_vars();
CHECK_EQ(tensor.ndim(), root_iter_vars.size());
Expand Down
Loading

0 comments on commit 302c2e6

Please sign in to comment.