Add formulas and basic tests (#49098)
Summary:
Pull Request resolved: https://github.com/pytorch/pytorch/pull/49098
RFC: https://github.com/pytorch/rfcs/pull/11
This PR adds:
- Codegen support to define forward grad formulas and few manual formulas
- Codegen support to automatically generate formulas as well as few usage
- Tests for basic forward grad components
Codegen generated examples.
For each of them, the only part that is changed is the if statement before the return checking for fw grad defined.
- For manual entry:
```yaml
- name: max(Tensor self) -> Tensor
self: evenly_distribute_backward(grad, self, result)
result: max_forward(self_fw_grad, self, result)
```
```cpp
Tensor max(const Tensor & self) {
auto& self_ = unpack(self, "self", 0);
auto _any_requires_grad = compute_requires_grad( self );
std::shared_ptr<MaxBackward1> grad_fn;
if (_any_requires_grad) {
grad_fn = std::shared_ptr<MaxBackward1>(new MaxBackward1(), deleteNode);
grad_fn->set_next_edges(collect_next_edges( self ));
grad_fn->self_ = SavedVariable(self, false);
}
#ifndef NDEBUG
c10::optional<Storage> self__storage_saved =
self_.has_storage() ? c10::optional<Storage>(self_.storage()) : c10::nullopt;
c10::intrusive_ptr<TensorImpl> self__impl_saved;
if (self_.defined()) self__impl_saved = self_.getIntrusivePtr();
#endif
auto tmp = ([&]() {
at::AutoNonVariableTypeMode non_var_type_mode(true);
return at::max(self_);
})();
auto result = std::move(tmp);
#ifndef NDEBUG
if (self__storage_saved.has_value())
AT_ASSERT(self__storage_saved.value().is_alias_of(self_.storage()));
if (self__impl_saved) AT_ASSERT(self__impl_saved == self_.getIntrusivePtr());
#endif
if (grad_fn) {
set_history(flatten_tensor_args( result ), grad_fn);
}
throw_error_for_complex_autograd(result, "max");
if (isFwGradDefined(self)) {
auto self_fw_grad = toLegacyFwGrad(self);
auto self_primal = toLegacyPrimal(self);
auto result_new_fw_grad = max_forward(self_fw_grad, self_primal, result);
if (result_new_fw_grad.defined()) {
result.set_fw_grad(result_new_fw_grad, /* level */ 0, /* is_inplace_op */ false);
}
}
if (grad_fn) {
grad_fn->result_ = SavedVariable(result, true);
}
return result;
}
```
- For element wise entry:
```yaml
- name: abs(Tensor self) -> Tensor
self: grad * self.sgn()
result: auto_element_wise
```
```cpp
Tensor abs(const Tensor & self) {
auto& self_ = unpack(self, "self", 0);
auto _any_requires_grad = compute_requires_grad( self );
std::shared_ptr<AbsBackward> grad_fn;
if (_any_requires_grad) {
grad_fn = std::shared_ptr<AbsBackward>(new AbsBackward(), deleteNode);
grad_fn->set_next_edges(collect_next_edges( self ));
grad_fn->self_ = SavedVariable(self, false);
}
#ifndef NDEBUG
c10::optional<Storage> self__storage_saved =
self_.has_storage() ? c10::optional<Storage>(self_.storage()) : c10::nullopt;
c10::intrusive_ptr<TensorImpl> self__impl_saved;
if (self_.defined()) self__impl_saved = self_.getIntrusivePtr();
#endif
auto tmp = ([&]() {
at::AutoNonVariableTypeMode non_var_type_mode(true);
return at::abs(self_);
})();
auto result = std::move(tmp);
#ifndef NDEBUG
if (self__storage_saved.has_value())
AT_ASSERT(self__storage_saved.value().is_alias_of(self_.storage()));
if (self__impl_saved) AT_ASSERT(self__impl_saved == self_.getIntrusivePtr());
#endif
if (grad_fn) {
set_history(flatten_tensor_args( result ), grad_fn);
}
throw_error_for_complex_autograd(result, "abs");
if (isFwGradDefined(self)) {
auto self_fw_grad = toLegacyFwGrad(self);
auto self_primal = toLegacyPrimal(self);
auto result_new_fw_grad = self_fw_grad * self_primal.sgn();
if (result_new_fw_grad.defined()) {
result.set_fw_grad(result_new_fw_grad, /* level */ 0, /* is_inplace_op */ false);
}
}
return result;
}
```
- For linear entry:
```yaml
- name: clone(Tensor self, *, MemoryFormat? memory_format=None) -> Tensor
self: grad
result: auto_linear
```
```cpp
Tensor clone(const Tensor & self, c10::optional<MemoryFormat> memory_format) {
auto& self_ = unpack(self, "self", 0);
auto _any_requires_grad = compute_requires_grad( self );
std::shared_ptr<CloneBackward> grad_fn;
if (_any_requires_grad) {
grad_fn = std::shared_ptr<CloneBackward>(new CloneBackward(), deleteNode);
grad_fn->set_next_edges(collect_next_edges( self ));
}
#ifndef NDEBUG
c10::optional<Storage> self__storage_saved =
self_.has_storage() ? c10::optional<Storage>(self_.storage()) : c10::nullopt;
c10::intrusive_ptr<TensorImpl> self__impl_saved;
if (self_.defined()) self__impl_saved = self_.getIntrusivePtr();
#endif
auto tmp = ([&]() {
at::AutoNonVariableTypeMode non_var_type_mode(true);
return at::clone(self_, memory_format);
})();
auto result = std::move(tmp);
#ifndef NDEBUG
if (self__storage_saved.has_value())
AT_ASSERT(self__storage_saved.value().is_alias_of(self_.storage()));
if (self__impl_saved) AT_ASSERT(self__impl_saved == self_.getIntrusivePtr());
#endif
if (grad_fn) {
set_history(flatten_tensor_args( result ), grad_fn);
}
if (isFwGradDefined(self)) {
auto self_fw_grad = toLegacyFwGrad(self);
auto result_new_fw_grad = at::clone(self_fw_grad, memory_format);
if (result_new_fw_grad.defined()) {
result.set_fw_grad(result_new_fw_grad, /* level */ 0, /* is_inplace_op */ false);
}
}
return result;
}
```
- For no entry:
```yaml
- name: angle(Tensor self) -> Tensor
self: angle_backward(grad, self)
```
```cpp
Tensor angle(const Tensor & self) {
auto& self_ = unpack(self, "self", 0);
auto _any_requires_grad = compute_requires_grad( self );
std::shared_ptr<AngleBackward> grad_fn;
if (_any_requires_grad) {
grad_fn = std::shared_ptr<AngleBackward>(new AngleBackward(), deleteNode);
grad_fn->set_next_edges(collect_next_edges( self ));
grad_fn->self_ = SavedVariable(self, false);
}
#ifndef NDEBUG
c10::optional<Storage> self__storage_saved =
self_.has_storage() ? c10::optional<Storage>(self_.storage()) : c10::nullopt;
c10::intrusive_ptr<TensorImpl> self__impl_saved;
if (self_.defined()) self__impl_saved = self_.getIntrusivePtr();
#endif
auto tmp = ([&]() {
at::AutoNonVariableTypeMode non_var_type_mode(true);
return at::angle(self_);
})();
auto result = std::move(tmp);
#ifndef NDEBUG
if (self__storage_saved.has_value())
AT_ASSERT(self__storage_saved.value().is_alias_of(self_.storage()));
if (self__impl_saved) AT_ASSERT(self__impl_saved == self_.getIntrusivePtr());
#endif
if (grad_fn) {
set_history(flatten_tensor_args( result ), grad_fn);
}
throw_error_for_complex_autograd(result, "angle");
TORCH_CHECK(!(isFwGradDefined(self)), "Trying to use forward prop with angle that does not support it.");
return result;
}
```
Test Plan: Imported from OSS
Reviewed By: ezyang
Differential Revision: D25607505
Pulled By: albanD
fbshipit-source-id: fe2315d587689af1cd5968536fa26c680b8b8829