id stringlengths 27 29 | content stringlengths 226 3.24k |
|---|---|
codereview_new_cpp_data_9325 | set_attribute_error_context(PyObject* v, PyObject* name)
}
// Intercept AttributeError exceptions and augment them to offer suggestions later.
PyObject *exc = PyErr_GetRaisedException();
- // Check if the exception is indeed an AttributeError
if (!PyErr_GivenExceptionMatches(exc, PyExc_AttributeError)) {
goto restore;
}
This comment seems redundant. What else could `PyErr_GivenExceptionMatches(exc, PyExc_AttributeError)` do?
set_attribute_error_context(PyObject* v, PyObject* name)
}
// Intercept AttributeError exceptions and augment them to offer suggestions later.
PyObject *exc = PyErr_GetRaisedException();
if (!PyErr_GivenExceptionMatches(exc, PyExc_AttributeError)) {
goto restore;
} |
codereview_new_cpp_data_9329 | py_get_system_clock(_PyTime_t *tp, _Py_clock_info_t *info, int raise)
info->monotonic = 0;
info->adjustable = 1;
if (clock_getres(CLOCK_REALTIME, &res) == 0) {
info->resolution = (double)res.tv_sec + (double)res.tv_nsec * 1e-9;
}
else {
It is not obvious why implicit conversion cannot be relied on here. Can you add a short comment explaining the need for explicit casts?
py_get_system_clock(_PyTime_t *tp, _Py_clock_info_t *info, int raise)
info->monotonic = 0;
info->adjustable = 1;
if (clock_getres(CLOCK_REALTIME, &res) == 0) {
+ /* the explicit (double) casts silence loss-of-precision warnings
+ on some platforms */
info->resolution = (double)res.tv_sec + (double)res.tv_nsec * 1e-9;
}
else { |
codereview_new_cpp_data_9330 | random_seed_time_pid(RandomObject *self)
key[0] = (uint32_t)(now & 0xffffffffU);
key[1] = (uint32_t)(now >> 32);
-#ifdef HAVE_GETPID
- key[2] = (uint32_t)getpid();
-#elif defined(MS_WINDOWS_NON_DESKTOP)
key[2] = (uint32_t)GetCurrentProcessId();
#else
key[2] = 0;
#endif
This case will need to be ordered above `HAVE_GETPID` with the changes mentioned in other comments. There's no problem with that.
random_seed_time_pid(RandomObject *self)
key[0] = (uint32_t)(now & 0xffffffffU);
key[1] = (uint32_t)(now >> 32);
+#ifdef MS_WINDOWS_NON_DESKTOP
key[2] = (uint32_t)GetCurrentProcessId();
+#elif defined(HAVE_GETPID)
+ key[2] = (uint32_t)getpid();
#else
key[2] = 0;
#endif |
codereview_new_cpp_data_9332 | instructions_to_cfg(PyObject *instructions, cfg_builder *g)
}
}
- for (Py_ssize_t i = 0; i < num_insts; i++) {
if (is_target[i]) {
jump_target_label lbl = {i};
RETURN_IF_ERROR(cfg_builder_use_label(g, lbl));
```suggestion
for (int i = 0; i < num_insts; i++) {
```
instructions_to_cfg(PyObject *instructions, cfg_builder *g)
}
}
+ for (int i = 0; i < num_insts; i++) {
if (is_target[i]) {
jump_target_label lbl = {i};
RETURN_IF_ERROR(cfg_builder_use_label(g, lbl)); |
codereview_new_cpp_data_9333 | instructions_to_cfg(PyObject *instructions, cfg_builder *g)
assert(PyList_Check(instructions));
Py_ssize_t num_insts = PyList_GET_SIZE(instructions);
- bool *is_target = PyMem_Malloc(num_insts * sizeof(bool));
- for (Py_ssize_t i = 0; i < num_insts; i++) {
- is_target[i] = false;
}
for (Py_ssize_t i = 0; i < num_insts; i++) {
PyObject *item = PyList_GET_ITEM(instructions, i);
These four lines (9708-9711), zeroing included, can be replaced with:
```c
bool *is_target = PyMem_Calloc(num_insts, sizeof(bool));
```
instructions_to_cfg(PyObject *instructions, cfg_builder *g)
assert(PyList_Check(instructions));
Py_ssize_t num_insts = PyList_GET_SIZE(instructions);
+ bool *is_target = PyMem_Calloc(num_insts, sizeof(bool));
+ if (is_target == NULL) {
+ return ERROR;
}
for (Py_ssize_t i = 0; i < num_insts; i++) {
PyObject *item = PyList_GET_ITEM(instructions, i); |
codereview_new_cpp_data_9335 | positional_only_passed_as_keyword(PyThreadState *tstate, PyCodeObject *co,
{
int posonly_conflicts = 0;
PyObject* posonly_names = PyList_New(0);
- if (posonly_names == NULL)
- goto fail;
for(int k=0; k < co->co_posonlyargcount; k++){
PyObject* posonly_name = PyTuple_GET_ITEM(co->co_localsplusnames, k);
```suggestion
if (posonly_names == NULL) {
goto fail;
}
```
positional_only_passed_as_keyword(PyThreadState *tstate, PyCodeObject *co,
{
int posonly_conflicts = 0;
PyObject* posonly_names = PyList_New(0);
+ if (posonly_names == NULL) {
+ goto fail;
+ }
for(int k=0; k < co->co_posonlyargcount; k++){
PyObject* posonly_name = PyTuple_GET_ITEM(co->co_localsplusnames, k);
|
codereview_new_cpp_data_9337 | corresponding Unix manual entries for more information on calls.");
#if defined(HAVE_SYS_XATTR_H) && defined(__linux__) && !defined(__FreeBSD_kernel__) && !defined(__GNU__)
# define USE_XATTRS
-# include <linux/limits.h>
#endif
#ifdef USE_XATTRS
is this specific direct linux kernel include required (it _should_ be harmless), i'd expect `sys/xattr.h` (below) or even `<limits.h>` to include this kernel header for us? (as seems to be the case on glibc and android bionic libc?)
corresponding Unix manual entries for more information on calls.");
#if defined(HAVE_SYS_XATTR_H) && defined(__linux__) && !defined(__FreeBSD_kernel__) && !defined(__GNU__)
# define USE_XATTRS
+# include <linux/limits.h> // Needed for XATTR_SIZE_MAX on musl libc.
#endif
#ifdef USE_XATTRS |
codereview_new_cpp_data_9338 | int_bit_count_impl(PyObject *self)
/*[clinic input]
int.as_integer_ratio
-Given an integer x, return the tuple (int(x), 1).
[clinic start generated code]*/
static PyObject *
int_as_integer_ratio_impl(PyObject *self)
-/*[clinic end generated code: output=e60803ae1cc8621a input=258f5b08307e7dcd]*/
{
PyObject *ratio_tuple;
PyObject *numerator = long_long(self);
Regarding the removal of the doc examples / doctest, I don't think there is anything wrong with it per say, but, IMO, I think it's best to probably just leave it untouched and to keep this PR focused on the issue at hand, which is clarifying that the ratios returned are in lowest terms.
int_bit_count_impl(PyObject *self)
/*[clinic input]
int.as_integer_ratio
+Return a pair of integers, whose ratio is equal to the original int.
+
+The ratio is in lowest terms and has a positive denominator.
+
+>>> (10).as_integer_ratio()
+(10, 1)
+>>> (-10).as_integer_ratio()
+(-10, 1)
+>>> (0).as_integer_ratio()
+(0, 1)
[clinic start generated code]*/
static PyObject *
int_as_integer_ratio_impl(PyObject *self)
+/*[clinic end generated code: output=e60803ae1cc8621a input=384ff1766634bec2]*/
{
PyObject *ratio_tuple;
PyObject *numerator = long_long(self); |
codereview_new_cpp_data_9340 | add_features(PyObject *mod)
static void
pyexpat_capsule_destructor(PyObject *capsule)
{
- PyMem_Free(PyCapsule_GetPointer(capsule, PyExpat_CAPSULE_NAME));
}
If `PyCapsule_GetPointer` fails it will set an exception. We should check that, and `PyErr_WriteUnraisable` on error.
add_features(PyObject *mod)
static void
pyexpat_capsule_destructor(PyObject *capsule)
{
+ void *p = PyCapsule_GetPointer(capsule, PyExpat_CAPSULE_NAME);
+ if (p == NULL) {
+ PyErr_WriteUnraisable(capsule);
+ return;
+ }
+ PyMem_Free(p);
}
|
codereview_new_cpp_data_9343 | static PyObject *
unicodeiter_reduce(unicodeiterobject *it, PyObject *Py_UNUSED(ignored))
{
PyObject *iter = _PyEval_GetBuiltin(&_Py_ID(iter));
if (it->it_seq != NULL) {
return Py_BuildValue("N(O)n", iter, it->it_seq, it->it_index);
Any reason not to include the same comment here as in the other cases?
static PyObject *
unicodeiter_reduce(unicodeiterobject *it, PyObject *Py_UNUSED(ignored))
{
PyObject *iter = _PyEval_GetBuiltin(&_Py_ID(iter));
+
+ /* _PyEval_GetBuiltin can invoke arbitrary code.
+ * calls must be *before* access of `it` pointers,
+ * since C parameter eval order is undefined.
+ * see issue #101765 */
if (it->it_seq != NULL) {
return Py_BuildValue("N(O)n", iter, it->it_seq, it->it_index); |
codereview_new_cpp_data_9344 | bytearrayiter_reduce(bytesiterobject *it, PyObject *Py_UNUSED(ignored))
{
PyObject *iter = _PyEval_GetBuiltin(&_Py_ID(iter));
- /* _PyEval_GetBuiltin can invoke arbitrary code.
- * calls must be *before* access of `it` pointers,
- * since C parameter eval order is undefined.
* see issue #101765 */
if (it->it_seq != NULL) {
I think parameter eval order isn't the issue and shouldn't be mentioned. The problem is that the `_PyEval_GetBuiltin` call invalidates the previous `if` check.
For example, this code would also be buggy:
```
if (it->it_seq != NULL) {
PyObject *iter = _PyEval_GetBuiltin(&_Py_ID(iter));
return Py_BuildValue("N(O)n", iter, it->it_seq, it->it_index);
}
```
The parameter evaluation order makes things a bit more unpredictable, but the bug is that the `if` check is done too early, not that we're relying on evaluation order.
bytearrayiter_reduce(bytesiterobject *it, PyObject *Py_UNUSED(ignored))
{
PyObject *iter = _PyEval_GetBuiltin(&_Py_ID(iter));
+ /* _PyEval_GetBuiltin can invoke arbitrary code,
+ * call must be before access of iterator pointers.
* see issue #101765 */
if (it->it_seq != NULL) { |
codereview_new_cpp_data_9351 | basicblock_next_instr(basicblock *b)
static int
stack_effect(int opcode, int oparg, int jump)
{
- if (0 <= opcode && opcode < 256) {
if (_PyOpcode_Deopt[opcode] != opcode) {
// Specialized instructions are not supported.
return PY_INVALID_STACK_EFFECT;
We have these:
```
#define MAX_REAL_OPCODE 254
#define IS_WITHIN_OPCODE_RANGE(opcode) \
(((opcode) >= 0 && (opcode) <= MAX_REAL_OPCODE) || \
IS_PSEUDO_OPCODE(opcode))
```
basicblock_next_instr(basicblock *b)
static int
stack_effect(int opcode, int oparg, int jump)
{
+ if (0 <= opcode && opcode <= MAX_REAL_OPCODE) {
if (_PyOpcode_Deopt[opcode] != opcode) {
// Specialized instructions are not supported.
return PY_INVALID_STACK_EFFECT; |
codereview_new_cpp_data_9352 | basicblock_next_instr(basicblock *b)
static int
stack_effect(int opcode, int oparg, int jump)
{
- if (0 <= opcode && opcode < 256) {
if (_PyOpcode_Deopt[opcode] != opcode) {
// Specialized instructions are not supported.
return PY_INVALID_STACK_EFFECT;
Wouldn't it be correct to return the stack effect of `_PyOpcode_Deopt[opcode]`?
basicblock_next_instr(basicblock *b)
static int
stack_effect(int opcode, int oparg, int jump)
{
+ if (0 <= opcode && opcode <= MAX_REAL_OPCODE) {
if (_PyOpcode_Deopt[opcode] != opcode) {
// Specialized instructions are not supported.
return PY_INVALID_STACK_EFFECT; |
codereview_new_cpp_data_9353 | m_sinpi(double x)
return copysign(1.0, x)*r;
}
-/* Implementation of the real gamma function. Kept here to workaround
issues (see e.g. #70309) with quality of libm's tgamma/lgamma implementations
on various platforms (Windows, MacOS). In extensive but non-exhaustive
random tests, this function proved accurate to within <= 10 ulps across the
Grammar nitpick:
```suggestion
/* Implementation of the real gamma function. Kept here to work around
```
m_sinpi(double x)
return copysign(1.0, x)*r;
}
+/* Implementation of the real gamma function. Kept here to work around
issues (see e.g. #70309) with quality of libm's tgamma/lgamma implementations
on various platforms (Windows, MacOS). In extensive but non-exhaustive
random tests, this function proved accurate to within <= 10 ulps across the |
codereview_new_cpp_data_9355 | dummy_func(
STAT_INC(FOR_ITER, deferred);
DECREMENT_ADAPTIVE_COUNTER(cache->counter);
#endif /* ENABLE_SPECIALIZATION */
- /* before: [iter]; after: [iter, iter()] *or* [] (and jump an extra instr.) */
next = (*Py_TYPE(iter)->tp_iternext)(iter);
if (next == NULL) {
if (_PyErr_Occurred(tstate)) {
```suggestion
/* before: [iter]; after: [iter, iter()] *or* [] (and jump over END_FOR.) */
```
dummy_func(
STAT_INC(FOR_ITER, deferred);
DECREMENT_ADAPTIVE_COUNTER(cache->counter);
#endif /* ENABLE_SPECIALIZATION */
+ /* before: [iter]; after: [iter, iter()] *or* [] (and jump over END_FOR.) */
next = (*Py_TYPE(iter)->tp_iternext)(iter);
if (next == NULL) {
if (_PyErr_Occurred(tstate)) { |
codereview_new_cpp_data_9356 | dummy_func(
else {
/* `iterable` is not a generator. */
iter = PyObject_GetIter(iterable);
- Py_DECREF(iterable);
if (iter == NULL) {
goto error;
}
}
PREDICT(LOAD_CONST);
}
This seems unsafe -- when we jump to `error`, `iterable` is still on the stack, but the stack no longer owns it.
dummy_func(
else {
/* `iterable` is not a generator. */
iter = PyObject_GetIter(iterable);
if (iter == NULL) {
goto error;
}
+ Py_DECREF(iterable);
}
PREDICT(LOAD_CONST);
} |
codereview_new_cpp_data_9357 | dummy_func(
prev_exc = Py_NewRef(Py_None);
}
assert(PyExceptionInstance_Check(new_exc));
- Py_INCREF(new_exc);
- exc_info->exc_value = new_exc;
}
// error: LOAD_ATTR has irregular stack effect
Nit: This could be combined into
```suggestion
exc_info->exc_value = Py_NewRef(new_exc);
```
dummy_func(
prev_exc = Py_NewRef(Py_None);
}
assert(PyExceptionInstance_Check(new_exc));
+ exc_info->exc_value = Py_NewRef(new_exc);
}
// error: LOAD_ATTR has irregular stack effect |
codereview_new_cpp_data_9360 | os__isfile_impl(PyObject *module, PyObject *path)
OPEN_EXISTING, FILE_FLAG_BACKUP_SEMANTICS, NULL);
}
if (hfile != INVALID_HANDLE_VALUE) {
- GetFileInformationByHandleEx(hfile, FileBasicInfo, &info, sizeof(info));
- result = !(info.FileAttributes & FILE_ATTRIBUTE_DIRECTORY);
- if (result) {
- fileType = GetFileType(hfile);
- if (fileType != FILE_TYPE_DISK) {
- result = 0;
- }
}
if (close_file) {
CloseHandle(hfile);
Unlike `isdir()`, for `isfile()` you need to check whether `GetFileInformationByHandleEx()` succeeds. It's a filesystem file/directory if the call succeeds. For example, "\\\\.\\PIPE\\" is the root directory of the named-pipe filesystem. OTOH, "\\\\.\\C:" is a volume a device, which doesn't support filesystem information classes such as `FileBasicInfo`.
os__isfile_impl(PyObject *module, PyObject *path)
OPEN_EXISTING, FILE_FLAG_BACKUP_SEMANTICS, NULL);
}
if (hfile != INVALID_HANDLE_VALUE) {
+ if (GetFileInformationByHandleEx(hfile, FileBasicInfo, &info, sizeof(info))) {
+ result = !(info.FileAttributes & FILE_ATTRIBUTE_DIRECTORY);
+ } else {
+ result = 0;
}
if (close_file) {
CloseHandle(hfile); |
codereview_new_cpp_data_9365 | dummy_func(
exc = args[0];
/* fall through */
case 0:
- if (do_raise(tstate, exc, cause)) {
- ERROR_IF(true, exception_unwind);
- }
break;
default:
_PyErr_SetString(tstate, PyExc_SystemError,
Couldn't this whole block be be this?
```cc
ERROR_IF(do_raise(tstate, exc, cause), exception_unwind);
```
dummy_func(
exc = args[0];
/* fall through */
case 0:
+ ERROR_IF(do_raise(tstate, exc, cause), exception_unwind);
break;
default:
_PyErr_SetString(tstate, PyExc_SystemError, |
codereview_new_cpp_data_9366 | get_module_state(PyObject *mod)
}
static struct PyModuleDef itertoolsmodule;
static inline itertools_state *
find_state_by_type(PyTypeObject *tp)
{
```suggestion
static struct PyModuleDef itertoolsmodule;
```
nit
get_module_state(PyObject *mod)
}
static struct PyModuleDef itertoolsmodule;
+
static inline itertools_state *
find_state_by_type(PyTypeObject *tp)
{ |
codereview_new_cpp_data_9367 | dummy_func(
// Success!
assert(PyTuple_CheckExact(attrs));
}
- else if (_PyErr_Occurred(tstate)) {
- // Error!
- ERROR_IF(true, error);
- }
else {
// Failure!
attrs = Py_NewRef(Py_None);
}
The `ERROR_IF(true, error)` inside of a conditional feels sort of strange to me. What do you think of this?
```c
else {
ERROR_IF(_PyErr_Occurred(tstate), error);
attrs = Py_NewRef(Py_None);
}
```
dummy_func(
// Success!
assert(PyTuple_CheckExact(attrs));
}
else {
+ // Error!
+ ERROR_IF(_PyErr_Occurred(tstate), error);
// Failure!
attrs = Py_NewRef(Py_None);
} |
codereview_new_cpp_data_9370 | elementiter_next(ElementIterObject *it)
continue;
}
elementtreestate *st = ET_STATE_GLOBAL;
assert(Element_Check(st, extra->children[child_index]));
elem = (ElementObject *)Py_NewRef(extra->children[child_index]);
item->child_index++;
}
```suggestion
#ifndef NDEBUG
elementtreestate *st = ET_STATE_GLOBAL;
assert(Element_Check(st, extra->children[child_index]));
#endif
```
elementiter_next(ElementIterObject *it)
continue;
}
+#ifndef NDEBUG
elementtreestate *st = ET_STATE_GLOBAL;
assert(Element_Check(st, extra->children[child_index]));
+#endif
elem = (ElementObject *)Py_NewRef(extra->children[child_index]);
item->child_index++;
} |
codereview_new_cpp_data_9379 | dummy_func(
_Py_DECREF_SPECIALIZED(left, _PyUnicode_ExactDealloc);
_Py_DECREF_SPECIALIZED(right, _PyUnicode_ExactDealloc);
assert(res == 0 || res == 1);
- assert((oparg & 15) == 7 || (oparg & 15) == 8);
- jump = (res + 7) & oparg;
}
super(COMPARE_OP_STR_JUMP) = _COMPARE_OP_STR + _JUMP_IF;
```suggestion
assert((oparg & 0xF) == 7 || (oparg & 0xF) == 8);
```
dummy_func(
_Py_DECREF_SPECIALIZED(left, _PyUnicode_ExactDealloc);
_Py_DECREF_SPECIALIZED(right, _PyUnicode_ExactDealloc);
assert(res == 0 || res == 1);
+ assert((oparg & 0xf) == COMPARISON_NOT_EQUALS || (oparg & 0xf) == COMPARISON_EQUALS);
+ assert(COMPARISON_NOT_EQUALS + 1 == COMPARISON_EQUALS);
+ jump = (res + COMPARISON_NOT_EQUALS) & oparg;
}
super(COMPARE_OP_STR_JUMP) = _COMPARE_OP_STR + _JUMP_IF; |
codereview_new_cpp_data_9380 | static int compiler_addcompare(struct compiler *c, location loc,
default:
Py_UNREACHABLE();
}
- /* cmp goes in top bits of the oparg, low bits are for the mask. */
ADDOP_I(c, loc, COMPARE_OP, cmp << 4);
return SUCCESS;
}
```suggestion
/* cmp goes in top bits of the oparg, while the low bits are used by specialized
* versions of this opcode to store the comparison mask. */
```
static int compiler_addcompare(struct compiler *c, location loc,
default:
Py_UNREACHABLE();
}
+ /* cmp goes in top bits of the oparg, while the low bits are used by specialized
+ * versions of this opcode to store the comparison mask. */
ADDOP_I(c, loc, COMPARE_OP, cmp << 4);
return SUCCESS;
} |
codereview_new_cpp_data_9381 | static int compiler_addcompare(struct compiler *c, location loc,
default:
Py_UNREACHABLE();
}
- /* cmp goes in top bits of the oparg, while the low bits are used by specialized
* versions of this opcode to store the comparison mask. */
ADDOP_I(c, loc, COMPARE_OP, cmp << 4);
return SUCCESS;
```suggestion
/* cmp goes in top bits of the oparg, while the low bits are used by quickened
```
static int compiler_addcompare(struct compiler *c, location loc,
default:
Py_UNREACHABLE();
}
+ /* cmp goes in top bits of the oparg, while the low bits are used by quickened
* versions of this opcode to store the comparison mask. */
ADDOP_I(c, loc, COMPARE_OP, cmp << 4);
return SUCCESS; |
codereview_new_cpp_data_9382 | dummy_func(
ERROR_IF(list == NULL, error);
}
- // 'stuff' is a list object followed by (oparg - 1) unused values
- inst(LIST_EXTEND, (stuff[oparg], iterable -- stuff[oparg])) {
- PyObject *none_val = _PyList_Extend((PyListObject *)stuff[0], iterable);
if (none_val == NULL) {
if (_PyErr_ExceptionMatches(tstate, PyExc_TypeError) &&
(Py_TYPE(iterable)->tp_iter == NULL && !PySequence_Check(iterable)))
These two make me *really* happy. :)
dummy_func(
ERROR_IF(list == NULL, error);
}
+ inst(LIST_EXTEND, (list, unused[oparg-1], iterable -- list, unused[oparg-1])) {
+ PyObject *none_val = _PyList_Extend((PyListObject *)list, iterable);
if (none_val == NULL) {
if (_PyErr_ExceptionMatches(tstate, PyExc_TypeError) &&
(Py_TYPE(iterable)->tp_iter == NULL && !PySequence_Check(iterable))) |
codereview_new_cpp_data_9390 | dummy_func(
}
inst(CALL_INTRINSIC_1, (value -- res)) {
res = _PyIntrinsics_UnaryFunctions[oparg](tstate, value);
Py_DECREF(value);
ERROR_IF(res == NULL, error);
If the arg is corrupted here then it will just crash. Shall we have an assertion that oparg is in range?
dummy_func(
}
inst(CALL_INTRINSIC_1, (value -- res)) {
+ assert(oparg <= MAX_INTRINSIC_1);
res = _PyIntrinsics_UnaryFunctions[oparg](tstate, value);
Py_DECREF(value);
ERROR_IF(res == NULL, error); |
codereview_new_cpp_data_9391 | dummy_func(
DEOPT_IF(argcount < minargs, CALL);
DEOPT_IF(!_PyThreadState_HasStackSpace(tstate, code->co_framesize), CALL);
STAT_INC(CALL, hit);
- _PyInterpreterFrame *new_frame = _PyFrame_PushUnchecked(tstate, func, argcount);
STACK_SHRINK(argcount);
for (int i = 0; i < argcount; i++) {
new_frame->localsplus[i] = stack_pointer[i];
```suggestion
_PyInterpreterFrame *new_frame = _PyFrame_PushUnchecked(tstate, func, code->co_argcount);
```
dummy_func(
DEOPT_IF(argcount < minargs, CALL);
DEOPT_IF(!_PyThreadState_HasStackSpace(tstate, code->co_framesize), CALL);
STAT_INC(CALL, hit);
+ _PyInterpreterFrame *new_frame = _PyFrame_PushUnchecked(tstate, func, code->co_argcount);
STACK_SHRINK(argcount);
for (int i = 0; i < argcount; i++) {
new_frame->localsplus[i] = stack_pointer[i]; |
codereview_new_cpp_data_9395 | PyCode_GetFreevars(PyCodeObject *code)
return _PyCode_GetFreevars(code);
}
-int
-_PyCode_GetNumFrameSlots(PyCodeObject *code)
-{
- /* This function needs to remain in sync with the calculation of
- * co_framesize in Tools/build/deepfreeze.py */
- assert(code->co_framesize >= FRAME_SPECIALS_SIZE);
- return code->co_framesize - FRAME_SPECIALS_SIZE;
-}
-
static void
deopt_code(_Py_CODEUNIT *instructions, Py_ssize_t len)
{
This should probably be a static inline function.
PyCode_GetFreevars(PyCodeObject *code)
return _PyCode_GetFreevars(code);
}
static void
deopt_code(_Py_CODEUNIT *instructions, Py_ssize_t len)
{ |
codereview_new_cpp_data_9397 | symtable_extend_namedexpr_scope(struct symtable *st, expr_ty e)
if (ste->ste_comprehension) {
long target_in_scope = _PyST_GetSymbol(ste, target_name);
if ((target_in_scope & DEF_COMP_ITER) &&
- (target_in_scope & (DEF_LOCAL | DEF_GLOBAL))) {
PyErr_Format(PyExc_SyntaxError, NAMED_EXPR_COMP_CONFLICT, target_name);
PyErr_RangedSyntaxLocationObject(st->st_filename,
e->lineno,
Hmm, should this also check `DEF_NONLOCAL`?
Actually, looking closer, I think `DEF_GLOBAL` might be impossible, because we already check for it when setting `DEF_COMP_ITER`. Or at least, tests pass without checking `DEF_GLOBAL` here.
symtable_extend_namedexpr_scope(struct symtable *st, expr_ty e)
if (ste->ste_comprehension) {
long target_in_scope = _PyST_GetSymbol(ste, target_name);
if ((target_in_scope & DEF_COMP_ITER) &&
+ (target_in_scope & DEF_LOCAL)) {
PyErr_Format(PyExc_SyntaxError, NAMED_EXPR_COMP_CONFLICT, target_name);
PyErr_RangedSyntaxLocationObject(st->st_filename,
e->lineno, |
codereview_new_cpp_data_9398 | long_long_meth(PyObject *self, PyObject *Py_UNUSED(ignored))
/*[clinic input]
int.is_integer
-Returns True.
[clinic start generated code]*/
static PyObject *
int_is_integer_impl(PyObject *self)
-/*[clinic end generated code: output=90f8e794ce5430ef input=5987f0abb5d0e177]*/
{
Py_RETURN_TRUE;
}
```suggestion
Returns True.
Exists for duck type compatibility with `float.is_integer`.
```
I think this should also be in `__doc__`.
long_long_meth(PyObject *self, PyObject *Py_UNUSED(ignored))
/*[clinic input]
int.is_integer
+Returns True. Exists for duck type compatibility with float.is_integer.
[clinic start generated code]*/
static PyObject *
int_is_integer_impl(PyObject *self)
+/*[clinic end generated code: output=90f8e794ce5430ef input=7e41c4d4416e05f2]*/
{
Py_RETURN_TRUE;
} |
codereview_new_cpp_data_9404 | dummy_func(
assert(self_cls->tp_flags & Py_TPFLAGS_MANAGED_DICT);
PyDictOrValues dorv = *_PyObject_DictOrValuesPointer(self);
DEOPT_IF(_PyDictOrValues_IsValues(dorv), LOAD_ATTR);
- PyDictKeysObject *keys = ((PyHeapTypeObject *)self_cls)->ht_cached_keys;
- DEOPT_IF(keys->dk_version != read_u32(cache->keys_version), LOAD_ATTR);
STAT_INC(LOAD_ATTR, hit);
PyObject *res = read_obj(cache->descr);
assert(res != NULL);
This is the wrong keys. You need to check the object's dictionary's keys.
dummy_func(
assert(self_cls->tp_flags & Py_TPFLAGS_MANAGED_DICT);
PyDictOrValues dorv = *_PyObject_DictOrValuesPointer(self);
DEOPT_IF(_PyDictOrValues_IsValues(dorv), LOAD_ATTR);
+ PyObject *dict = _PyDictOrValues_GetDict(dorv);
+ PyDictKeysObject *keys = (dict == NULL) ? NULL : ((PyDictObject *)dict)->ma_keys;
+ // Note: cache->keys_version can be 0 when dict is NULL.
+ DEOPT_IF(keys != NULL && keys->dk_version != read_u32(cache->keys_version), LOAD_ATTR);
STAT_INC(LOAD_ATTR, hit);
PyObject *res = read_obj(cache->descr);
assert(res != NULL); |
codereview_new_cpp_data_9420 | void addDoubleTilingPadExpertPassPipeline(OpPassManager &passManager,
{
LinalgSingleTilingExpertPassOptions options;
options.vectorize = true;
- options.enableVectorMasking = true;
options.vectorizePadding = true;
nestedModulePM.addNestedPass<func::FuncOp>(
createLinalgSingleTilingExpertPass(options));
THis is set to true unconditionally?
void addDoubleTilingPadExpertPassPipeline(OpPassManager &passManager,
{
LinalgSingleTilingExpertPassOptions options;
options.vectorize = true;
+ options.enableVectorMasking = enableVectorMasking;
options.vectorizePadding = true;
nestedModulePM.addNestedPass<func::FuncOp>(
createLinalgSingleTilingExpertPass(options)); |
codereview_new_cpp_data_9421 | DiagnosedSilenceableFailure transform_dialect::ApplyPatternsOp::applyToOne(
// upstream moveLoopInvariantCode if necessary.
funcOp->walk(
[](LoopLikeOpInterface loopLike) { moveLoopInvariantCode(loopLike); });
- // For now, put single loop promotion as part of licm.
- // TODO: This is almost certainly mising rewriter/listener and needs to be
- // plumbed through upstream `promoteIfSingleIteration`.
funcOp->walk([](Operation *op) {
(void)llvm::TypeSwitch<Operation *, LogicalResult>(op)
.Case<AffineForOp, scf::ForOp>(
Both affine and scf versions of this code `splice` the body of the loop into the parent region, and do some RAUW calls. From the tracking point of view, operation mapping should be fine, and we are not using value mapping yet.
DiagnosedSilenceableFailure transform_dialect::ApplyPatternsOp::applyToOne(
// upstream moveLoopInvariantCode if necessary.
funcOp->walk(
[](LoopLikeOpInterface loopLike) { moveLoopInvariantCode(loopLike); });
+ // For now, put single loop promotion as part of licm. Underlying
+ // implementations perform splice operations which shouldn't need tracking.
+ // TODO: confirm / revisit this assumption and plumb a rewriter through
+ // upstream moveLoopInvariantCode if necessary.
funcOp->walk([](Operation *op) {
(void)llvm::TypeSwitch<Operation *, LogicalResult>(op)
.Case<AffineForOp, scf::ForOp>( |
codereview_new_cpp_data_9422 | void LLVMGPULowerExecutableTargetPass::runOnOperation() {
case IREE::Codegen::DispatchLoweringPassPipeline::LLVMGPUWarpReduction:
addGPUWarpReductionPassPipeline(executableLoweringPipeline);
break;
- case IREE::Codegen::DispatchLoweringPassPipeline::LLVMGPUDataTiling:
addGPUDataTilingPasses(executableLoweringPipeline);
break;
// Transform-dialect pipelines.
please fix the naming this is independent of data tiling.
void LLVMGPULowerExecutableTargetPass::runOnOperation() {
case IREE::Codegen::DispatchLoweringPassPipeline::LLVMGPUWarpReduction:
addGPUWarpReductionPassPipeline(executableLoweringPipeline);
break;
+ case IREE::Codegen::DispatchLoweringPassPipeline::LLVMGPUPackUnPack:
addGPUDataTilingPasses(executableLoweringPipeline);
break;
// Transform-dialect pipelines. |
codereview_new_cpp_data_9423 | void addGPUTransposePassPipeline(OpPassManager &pm) {
addBufferizePasses(nestedModulePM);
// distribute foreach threads
- addBufferizePasses(nestedModulePM);
nestedModulePM.addNestedPass<func::FuncOp>(createLLVMGPUDistribute());
nestedModulePM.addNestedPass<func::FuncOp>(createMemrefCopyToLinalgPass());
Added by mistake?
void addGPUTransposePassPipeline(OpPassManager &pm) {
addBufferizePasses(nestedModulePM);
// distribute foreach threads
nestedModulePM.addNestedPass<func::FuncOp>(createLLVMGPUDistribute());
nestedModulePM.addNestedPass<func::FuncOp>(createMemrefCopyToLinalgPass()); |
codereview_new_cpp_data_9424 | struct ScatterInt64Indices : public OpRewritePattern<mhlo::ScatterOp> {
return rewriter.notifyMatchFailure(op, "cannot validate legal size");
uint64_t maxSize = std::numeric_limits<int32_t>::max();
- if (indicesETy.getIntOrFloatBitWidth() > 32)
- for (int i = 0, s = indicesTy.getRank(); i < s; ++i)
- if (indicesTy.getDimSize(i) > maxSize)
return rewriter.notifyMatchFailure(op, "index may exceed i32 max");
indices = rewriter.create<mhlo::ConvertOp>(
op.getLoc(), indicesTy.clone(rewriter.getI32Type()), indices);
IREE style nit: wrap with braces on if/for simple single-statement bodies.
struct ScatterInt64Indices : public OpRewritePattern<mhlo::ScatterOp> {
return rewriter.notifyMatchFailure(op, "cannot validate legal size");
uint64_t maxSize = std::numeric_limits<int32_t>::max();
+ if (indicesETy.getIntOrFloatBitWidth() > 32) {
+ for (int i = 0, s = indicesTy.getRank(); i < s; ++i) {
+ if (indicesTy.getDimSize(i) > maxSize) {
return rewriter.notifyMatchFailure(op, "index may exceed i32 max");
+ }}}
indices = rewriter.create<mhlo::ConvertOp>(
op.getLoc(), indicesTy.clone(rewriter.getI32Type()), indices); |
codereview_new_cpp_data_9425 | static LogicalResult setMaliMatmulConfig(linalg::LinalgOp op,
const int subgroupSize = limits.getSubgroupSize();
const std::array<int64_t, 2> workgroupXY = {subgroupSize / 2, 2};
std::array<int64_t, 3> threadMNK;
- Type elementType = op.getDpsInputOperand(0)
- ->get()
- .getType()
- .cast<ShapedType>()
- .getElementType();
if (elementType.getIntOrFloatBitWidth() == 16) {
threadMNK = {2, 8, 8};
} else if (elementType.isInteger(8)) {
- threadMNK = {2, 4, 4};
} else {
threadMNK = {6, 4, 4};
}
Nit: can we use two local variables for this long chain of method calls? The formatting here is not very nice. :)
static LogicalResult setMaliMatmulConfig(linalg::LinalgOp op,
const int subgroupSize = limits.getSubgroupSize();
const std::array<int64_t, 2> workgroupXY = {subgroupSize / 2, 2};
std::array<int64_t, 3> threadMNK;
+ Type inputType = op.getDpsInputOperand(0)->get().getType();
+ Type elementType = inputType.cast<ShapedType>().getElementType();
if (elementType.getIntOrFloatBitWidth() == 16) {
threadMNK = {2, 8, 8};
} else if (elementType.isInteger(8)) {
+ threadMNK = {4, 4, 4};
} else {
threadMNK = {6, 4, 4};
} |
codereview_new_cpp_data_9428 | FailureOr<TileAndFuseResult> tileAndFuseDispatchUsingSCFForOp(
return rewriter.notifyMatchFailure(sliceOp,
"fusion along slice op failed");
}
- Operation *tiledProducer = tiledProducerVal->getDefiningOp();
- if (!llvm::dyn_cast_or_null<TilingInterface>(tiledProducer)) {
return rewriter.notifyMatchFailure(
- tiledProducer,
"expected tiled implementation to implement TilingInterface as well");
}
if (tiledProducer->getNumResults() != fusableProducer->getNumResults()) {
I suspect this is the bug - passing tiledProducer (which is nullptr) to notifyMatchFailure
FailureOr<TileAndFuseResult> tileAndFuseDispatchUsingSCFForOp(
return rewriter.notifyMatchFailure(sliceOp,
"fusion along slice op failed");
}
+ auto tiledProducer = tiledProducerVal->getDefiningOp<TilingInterface>();
+ if (!tiledProducer) {
return rewriter.notifyMatchFailure(
+ tiledProducerVal->getDefiningOp(),
"expected tiled implementation to implement TilingInterface as well");
}
if (tiledProducer->getNumResults() != fusableProducer->getNumResults()) { |
codereview_new_cpp_data_9429 | struct LinalgExtOpInterface
}
};
template <typename OpTy>
static FailureOr<std::pair<Value, Value>> getSourceAndDestFromPackUnPackOp(
RewriterBase &rewriter, OpTy op, const BufferizationOptions &options) {
Value source;
auto maybeBuffer = getBuffer(rewriter, op.getSource(), options);
if (failed(maybeBuffer)) return failure();
COuld we add some comments here?
struct LinalgExtOpInterface
}
};
+/// Returns the buffers of the source and destination for pack and unpack ops.
+/// Returns a failure if the buffers can not be found.
template <typename OpTy>
static FailureOr<std::pair<Value, Value>> getSourceAndDestFromPackUnPackOp(
RewriterBase &rewriter, OpTy op, const BufferizationOptions &options) {
+ static_assert(llvm::is_one_of<OpTy, tensor::PackOp, tensor::UnPackOp>::value);
Value source;
auto maybeBuffer = getBuffer(rewriter, op.getSource(), options);
if (failed(maybeBuffer)) return failure(); |
codereview_new_cpp_data_9430 | transform_ext::StructuredOpMatcher::StructuredOpMatcher(
StructuredOpMatcher &A, StructuredOpMatcher &B) {
predicates.push_back([&A, &B](linalg::LinalgOp linalgOp) -> bool {
- LLVM_DEBUG(DBGS() << "start recursive match {\n");
{
auto debugRAII = llvm::make_scope_exit(
[] { LLVM_DEBUG(DBGS() << "} end recursive match"); });
if (A.match(linalgOp))
return true;
}
- if (B.match(linalgOp))
- return true;
return false;
});
recordNestedMatcher(A);
Nit: we also want the start/end debug output around this one.
transform_ext::StructuredOpMatcher::StructuredOpMatcher(
StructuredOpMatcher &A, StructuredOpMatcher &B) {
predicates.push_back([&A, &B](linalg::LinalgOp linalgOp) -> bool {
+ LLVM_DEBUG(DBGS() << "start recursive lhs OR match {\n");
{
auto debugRAII = llvm::make_scope_exit(
[] { LLVM_DEBUG(DBGS() << "} end recursive match"); });
if (A.match(linalgOp))
return true;
}
+ LLVM_DEBUG(DBGS() << "start recursive rhs OR match {\n");
+ {
+ auto debugRAII = llvm::make_scope_exit(
+ [] { LLVM_DEBUG(DBGS() << "} end recursive match"); });
+ if (B.match(linalgOp))
+ return true;
+ }
return false;
});
recordNestedMatcher(A); |
codereview_new_cpp_data_9431 | transform_ext::StructuredOpMatcher::StructuredOpMatcher(
StructuredOpMatcher &A, StructuredOpMatcher &B) {
predicates.push_back([&A, &B](linalg::LinalgOp linalgOp) -> bool {
- LLVM_DEBUG(DBGS() << "start recursive match {\n");
{
auto debugRAII = llvm::make_scope_exit(
[] { LLVM_DEBUG(DBGS() << "} end recursive match"); });
if (A.match(linalgOp))
return true;
}
- if (B.match(linalgOp))
- return true;
return false;
});
recordNestedMatcher(A);
Nit: let's mention in the output we are doing an "or".
transform_ext::StructuredOpMatcher::StructuredOpMatcher(
StructuredOpMatcher &A, StructuredOpMatcher &B) {
predicates.push_back([&A, &B](linalg::LinalgOp linalgOp) -> bool {
+ LLVM_DEBUG(DBGS() << "start recursive lhs OR match {\n");
{
auto debugRAII = llvm::make_scope_exit(
[] { LLVM_DEBUG(DBGS() << "} end recursive match"); });
if (A.match(linalgOp))
return true;
}
+ LLVM_DEBUG(DBGS() << "start recursive rhs OR match {\n");
+ {
+ auto debugRAII = llvm::make_scope_exit(
+ [] { LLVM_DEBUG(DBGS() << "} end recursive match"); });
+ if (B.match(linalgOp))
+ return true;
+ }
return false;
});
recordNestedMatcher(A); |
codereview_new_cpp_data_9432 | static FailureOr<Operation *> getRootOp(Operation *op) {
funcOp = op->getParentOfType<func::FuncOp>();
}
Operation *rootOp = nullptr;
mlir::iree_compiler::IREE::Codegen::LoweringConfigAttr rootLoweringConfig;
auto result = funcOp.walk([&](Operation *op) -> WalkResult {
maybe we should check if funcOp is nullptr or not. An assertion is fine to me.
static FailureOr<Operation *> getRootOp(Operation *op) {
funcOp = op->getParentOfType<func::FuncOp>();
}
+ assert(funcOp && "Missing funcOp");
+
Operation *rootOp = nullptr;
mlir::iree_compiler::IREE::Codegen::LoweringConfigAttr rootLoweringConfig;
auto result = funcOp.walk([&](Operation *op) -> WalkResult { |
codereview_new_cpp_data_9433 | struct ConvertMHLOToLinalgOnTensorsPass
context);
patterns.insert<GenericTypeConvert>(
ml_program::GlobalStoreOp::getOperationName(), *typeConverter, context);
- // needed to convert mhlo::ReplicaIDOp
patterns.insert<GenericTypeConvert>(
tensor::FromElementsOp::getOperationName(), *typeConverter, context);
patterns.insert<GenericTypeConvert>(
style nit: https://google.github.io/styleguide/cppguide.html#Punctuation,_Spelling_and_Grammar
struct ConvertMHLOToLinalgOnTensorsPass
context);
patterns.insert<GenericTypeConvert>(
ml_program::GlobalStoreOp::getOperationName(), *typeConverter, context);
+ // This is needed when converting mhlo::ReplicaIDOp.
patterns.insert<GenericTypeConvert>(
tensor::FromElementsOp::getOperationName(), *typeConverter, context);
patterns.insert<GenericTypeConvert>( |
codereview_new_cpp_data_9434 | static iree_status_t iree_hal_cuda_device_create_channel(
// We could multiplex channels but it'd be better to surface that to the
// compiler so that it can emit the right rank math.
int requested_count = iree_math_count_ones_u64(queue_affinity);
- // FIXME: queue affinity is not set yet correctly, so we have all bits set.
if (requested_count != 64 && requested_count != 1) {
return iree_make_status(IREE_STATUS_INVALID_ARGUMENT,
"exactly one participant is allowed in a "
this isn't going to work when we have multiple queues in CUDA - if you need this to unblock yourself then please file an issue and tag it here with `// TODO(#issue): properly assign affinity in the compiler.`
static iree_status_t iree_hal_cuda_device_create_channel(
// We could multiplex channels but it'd be better to surface that to the
// compiler so that it can emit the right rank math.
int requested_count = iree_math_count_ones_u64(queue_affinity);
+ // TODO(#12206): properly assign affinity in the compiler.
if (requested_count != 64 && requested_count != 1) {
return iree_make_status(IREE_STATUS_INVALID_ARGUMENT,
"exactly one participant is allowed in a " |
codereview_new_cpp_data_9435 | static void buildStagedReductionStrategyThreadLevel(
Value root = blockCombinerOpH;
SmallVector<Value> opsToFuse = {gridFillH};
// If we have a unit dim after the reduction that doesn't broadcast fuse it
- // wuth the reduction.
if (strategy.captures.maybeTrailingRank ==
strategy.captures.reductionRank - 1) {
root = maybeTiledTrailingH;
I am not 100% clear what this condition does, can you elaborate a bit?
Seems quite brittle to me.
static void buildStagedReductionStrategyThreadLevel(
Value root = blockCombinerOpH;
SmallVector<Value> opsToFuse = {gridFillH};
// If we have a unit dim after the reduction that doesn't broadcast fuse it
+ // with the reduction.
if (strategy.captures.maybeTrailingRank ==
strategy.captures.reductionRank - 1) {
root = maybeTiledTrailingH; |
codereview_new_cpp_data_9436 | static FailureOr<IREE::Codegen::LoweringConfigAttr> collectComputeOps(
// discover such computation ops so that we can tile and fuse both regions.
SmallVector<scf::IfOp, 1> ifOps;
funcOp.walk<WalkOrder::PreOrder>([&ifOps](Operation *op) -> WalkResult {
- if (isa<linalg::LinalgOp, TilingInterface>(op)) {
// Exclude scf.if in linalg op
return WalkResult::skip();
} else if (auto ifOp = dyn_cast<scf::IfOp>(op)) {
Could you just check in the walk if the parent is a `LinalgOp` ?
static FailureOr<IREE::Codegen::LoweringConfigAttr> collectComputeOps(
// discover such computation ops so that we can tile and fuse both regions.
SmallVector<scf::IfOp, 1> ifOps;
funcOp.walk<WalkOrder::PreOrder>([&ifOps](Operation *op) -> WalkResult {
+ if (isa<linalg::LinalgOp>(op)) {
// Exclude scf.if in linalg op
return WalkResult::skip();
} else if (auto ifOp = dyn_cast<scf::IfOp>(op)) { |
codereview_new_cpp_data_9437 | static void addTileAndDistributePasses(
pm.addPass(createTileAndDistributeToWorkgroupsPass());
auto &nestedModulePM = pm.nest<ModuleOp>();
nestedModulePM.addNestedPass<func::FuncOp>(
- IREE::LinalgExt::createDecomposeAttentionPass());
nestedModulePM.addNestedPass<func::FuncOp>(
IREE::LinalgExt::createDecomposeSoftmaxPass());
if (clEnablePadConsumerFusion && useFuseTensorPadWithConsumerPass) {
Nice! I have a suggestion here that is not necessary for this PR.
It seems like we can combine the "DecomposeAttentionPass" and "DecomposeSoftmaxPass" into a single pass with a new interface. A simple interface that just adds a single method, i.e. "decomposeOperation" that decomposes the operation into the required sequence. Then the pass is just walking over the IR and decomposing the ops as needed?
WDYT?
Also FYI @ftynse (since Alex seems to be interested)
static void addTileAndDistributePasses(
pm.addPass(createTileAndDistributeToWorkgroupsPass());
auto &nestedModulePM = pm.nest<ModuleOp>();
nestedModulePM.addNestedPass<func::FuncOp>(
+ IREE::LinalgExt::createTileAndDecomposeAttentionPass());
nestedModulePM.addNestedPass<func::FuncOp>(
IREE::LinalgExt::createDecomposeSoftmaxPass());
if (clEnablePadConsumerFusion && useFuseTensorPadWithConsumerPass) { |
codereview_new_cpp_data_9438 | LogicalResult WinogradOutputTransformOp::reifyResultShapes(
LogicalResult SoftmaxOp::verify() {
Operation *op = getOperation();
- if (getNumInputs() != 1) {
- return op->emitOpError("expected one input operand");
- }
- if (getNumOutputs() != 1) {
- return op->emitOpError("expected one output operand");
- }
auto inputType = input().getType().cast<ShapedType>();
auto outputType = output().getType().cast<ShapedType>();
- if (outputType.getElementType() != inputType.getElementType()) {
- return op->emitOpError(
- "expected input/output element types to be identical");
- }
- SmallVector<int64_t> inputShape(inputType.getShape());
ArrayRef<int64_t> outputShape = outputType.getShape();
if (!areShapesCompatible(inputShape, outputShape)) {
return op->emitOpError("incompatible output shape");
nit: I believe all those can be enforced with tablegen declaration?
LogicalResult WinogradOutputTransformOp::reifyResultShapes(
LogicalResult SoftmaxOp::verify() {
Operation *op = getOperation();
auto inputType = input().getType().cast<ShapedType>();
auto outputType = output().getType().cast<ShapedType>();
+ ArrayRef<int64_t> inputShape = inputType.getShape();
ArrayRef<int64_t> outputShape = outputType.getShape();
if (!areShapesCompatible(inputShape, outputShape)) {
return op->emitOpError("incompatible output shape"); |
codereview_new_cpp_data_9439 | struct LinalgStrategyDecomposePass
if (!anchorFuncName.empty() && funcOp.getName() != anchorFuncName)
return;
RewritePatternSet decompositionPattern(funcOp.getContext());
decompositionPattern.add<
DownscaleSizeOneWindowed2DConvolution<linalg::Conv2DNhwcHwcfOp,
linalg::Conv1DNwcWcfOp>,
Just use
```
linalg::populateDecomposeConvolutionPatterns(decompositionPattern);
```
This way you don't have to keep the files in sync, you just include the patterns as they are added.
struct LinalgStrategyDecomposePass
if (!anchorFuncName.empty() && funcOp.getName() != anchorFuncName)
return;
RewritePatternSet decompositionPattern(funcOp.getContext());
+ // TODO(muralivi): Use
+ // linalg::populateDecomposeConvolutionPatterns(decompositionPattern).
decompositionPattern.add<
DownscaleSizeOneWindowed2DConvolution<linalg::Conv2DNhwcHwcfOp,
linalg::Conv1DNwcWcfOp>, |
codereview_new_cpp_data_9441 | std::pair<Value, Value> mlir::iree_compiler::cpu::buildCommonTrailingStrategy(
static FailureOr<ReductionConfig> applyKnownGoodReductionConfigurations(
const transform_ext::MatchedReductionCaptures &captures,
const CPUModel &cpuModel) {
int64_t reductionSize = captures.reductionOpSizes.back();
if (cpuModel.model == CPUModel::kXeonGold6154) {
if (captures.reductionOutputElementalTypeBitWidth == 32) {
- if (reductionSize) return ReductionConfig{32};
}
}
- return failure();
}
static ReductionConfig getReductionConfig(
Nit: this config looks too much like magic numbers. Can we at least have `/*foo=*/` comments in the constructor?
std::pair<Value, Value> mlir::iree_compiler::cpu::buildCommonTrailingStrategy(
static FailureOr<ReductionConfig> applyKnownGoodReductionConfigurations(
const transform_ext::MatchedReductionCaptures &captures,
const CPUModel &cpuModel) {
+ std::optional<int64_t> vectorSize;
int64_t reductionSize = captures.reductionOpSizes.back();
if (cpuModel.model == CPUModel::kXeonGold6154) {
if (captures.reductionOutputElementalTypeBitWidth == 32) {
+ if (reductionSize == 32) vectorSize = 32;
}
}
+ if (!vectorSize.has_value()) return failure();
+ return ReductionConfig{vectorSize.value()};
}
static ReductionConfig getReductionConfig( |
codereview_new_cpp_data_9449 | static SmallVector<int64_t> getPackOpResultTypeShape(
return resultShape;
}
-// Converts OpFoldResults to int64_t shape entries, unconditionally mapping all
-// Value's to kDynamic, even if they are arith.constant values.
-static SmallVector<int64_t>
-asShapeWithAnyValueAsDynamic(ArrayRef<OpFoldResult> ofrs) {
- SmallVector<int64_t> result;
- for (auto o : ofrs) {
- // Have to do this first, as getConstantIntValue special-cases constants.
- if (o.dyn_cast<Value>())
- result.push_back(ShapedType::kDynamic);
- else
- result.push_back(getConstantIntValue(o).value_or(ShapedType::kDynamic));
- }
- return result;
-}
-
SmallVector<OpFoldResult> PackOp::getResultShape(
OpBuilder &builder, Location loc, ArrayRef<OpFoldResult> sourceDims,
ArrayRef<OpFoldResult> innerTileSizes, ArrayRef<int64_t> innerDimsPos,
Might be worth moving this into https://github.com/iree-org/iree/blob/main/llvm-external-projects/iree-dialects/include/iree-dialects/Dialect/LinalgExt/Utils/Utils.h or in MLIR file (`StaticValueUtils.h`)
static SmallVector<int64_t> getPackOpResultTypeShape(
return resultShape;
}
SmallVector<OpFoldResult> PackOp::getResultShape(
OpBuilder &builder, Location loc, ArrayRef<OpFoldResult> sourceDims,
ArrayRef<OpFoldResult> innerTileSizes, ArrayRef<int64_t> innerDimsPos, |
codereview_new_cpp_data_9452 | class FPToUIOpConversion : public OpConversionPattern<arith::FPToUIOp> {
ConversionPatternRewriter &rewriter) const override {
auto srcType = srcOp.getIn().getType();
auto dstType = srcOp.getResult().getType();
- dstType.dump();
auto resultType = getTypeConverter()->convertType(dstType);
if (srcType.isF32()) {
if (dstType.isSignlessInteger(32) || dstType.isUnsignedInteger(32)) {
Remove debug code before merge please
class FPToUIOpConversion : public OpConversionPattern<arith::FPToUIOp> {
ConversionPatternRewriter &rewriter) const override {
auto srcType = srcOp.getIn().getType();
auto dstType = srcOp.getResult().getType();
auto resultType = getTypeConverter()->convertType(dstType);
if (srcType.isF32()) {
if (dstType.isSignlessInteger(32) || dstType.isUnsignedInteger(32)) { |
codereview_new_cpp_data_9453 | class FPToUIOpConversion : public OpConversionPattern<arith::FPToUIOp> {
ConversionPatternRewriter &rewriter) const override {
auto srcType = srcOp.getIn().getType();
auto dstType = srcOp.getResult().getType();
- dstType.dump();
auto resultType = getTypeConverter()->convertType(dstType);
if (srcType.isF32()) {
if (dstType.isSignlessInteger(32) || dstType.isUnsignedInteger(32)) {
This time with a suggested change
```suggestion
auto dstType = srcOp.getResult().getType();
auto resultType = getTypeConverter()->convertType(dstType);
```
class FPToUIOpConversion : public OpConversionPattern<arith::FPToUIOp> {
ConversionPatternRewriter &rewriter) const override {
auto srcType = srcOp.getIn().getType();
auto dstType = srcOp.getResult().getType();
auto resultType = getTypeConverter()->convertType(dstType);
if (srcType.isF32()) {
if (dstType.isSignlessInteger(32) || dstType.isUnsignedInteger(32)) { |
codereview_new_cpp_data_9454 | static void createTransformRegion(func::FuncOp entryPoint,
MLIRContext *ctx = entryPoint.getContext();
Location loc = entryPoint.getLoc();
OpBuilder b(ctx);
- auto mod = entryPoint->getParentOfType<ModuleOp>();
- b.setInsertionPointAfter(mod);
auto topLevelTransformModule = b.create<ModuleOp>(loc);
Region &topLevelTransformRegion = topLevelTransformModule.getBodyRegion();
b.setInsertionPointToStart(&topLevelTransformRegion.front());
This is inserting the transform dialect strategy within parent scope. So there is an implicit assumption that the pass itself is run on the parent scope. We can either pass the module in here explicitly to signify that the module is the translation unit, or we can insert it within the module, and discard it at the end of code-generation?
static void createTransformRegion(func::FuncOp entryPoint,
MLIRContext *ctx = entryPoint.getContext();
Location loc = entryPoint.getLoc();
OpBuilder b(ctx);
+ b.setInsertionPointAfter(entryPoint);
auto topLevelTransformModule = b.create<ModuleOp>(loc);
Region &topLevelTransformRegion = topLevelTransformModule.getBodyRegion();
b.setInsertionPointToStart(&topLevelTransformRegion.front()); |
codereview_new_cpp_data_9455 | class SPIRVTileAndPromotePass final
void runOnOperation() override;
private:
- /// Prmotes C matrix to shared memory when necessary and returns success if no
/// error happens.
LogicalResult doPromoteCMatrix(func::FuncOp funcOp) const;
```suggestion
/// Promotes C matrix to shared memory when necessary and returns success if no
```
class SPIRVTileAndPromotePass final
void runOnOperation() override;
private:
+ /// Promotes C matrix to shared memory when necessary and returns success if no
/// error happens.
LogicalResult doPromoteCMatrix(func::FuncOp funcOp) const;
|
codereview_new_cpp_data_9456 | void buildGlobalOptimizationPassPipeline(
/// uses case.
static void buildOptionalPreprocessingPassPipeline(OpPassManager &passManager) {
FunctionLikeNest(passManager)
- .addPredicatedPass(clEnableConvToImg2Col,
- IREE::Flow::createConvertConv2DToImg2ColPass)
.addPredicatedPass(clEnableConvToWinograd,
IREE::LinalgExt::createConvertConv2DToWinogradPass)
.addPredicatedPass(
!clMmt4dTargetOptions.empty(),
[]() {
Do you want to do this before conversion to im2col?
void buildGlobalOptimizationPassPipeline(
/// uses case.
static void buildOptionalPreprocessingPassPipeline(OpPassManager &passManager) {
FunctionLikeNest(passManager)
.addPredicatedPass(clEnableConvToWinograd,
IREE::LinalgExt::createConvertConv2DToWinogradPass)
+ .addPredicatedPass(clEnableConvToImg2Col,
+ IREE::Flow::createConvertConv2DToImg2ColPass)
.addPredicatedPass(
!clMmt4dTargetOptions.empty(),
[]() { |
codereview_new_cpp_data_9457 | class WGSLReplacePushConstantsPass
// We could store into a tensor<Nxi32>, but vec4s are better supported, so
// we'll use tensor<Nxvector<4xi32>> instead.
uint64_t numberOfVec4s = maxConstantIndex / 4 + 1;
// hal.interface.binding.subspan ->
llvm has `llvm::divideCeil` for it. So if it's a perfect multiple of 4, we can avoid the `+1` there.
class WGSLReplacePushConstantsPass
// We could store into a tensor<Nxi32>, but vec4s are better supported, so
// we'll use tensor<Nxvector<4xi32>> instead.
+ // Compute how many vec4s to use, i.e.
+ // max index 0 -> 1 vec4
+ // max index 3 -> 1 vec4
+ // max index 4 -> 2 vec4s
uint64_t numberOfVec4s = maxConstantIndex / 4 + 1;
// hal.interface.binding.subspan -> |
codereview_new_cpp_data_9458 | linalg::LinalgLoopDistributionOptions getIREELinalgLoopDistributionOptions(
SmallVector<linalg::ProcInfo, 3> procInfo(numParallelDims);
Value splitDim;
for (size_t dim = 0; dim < numParallelDims; ++dim) {
- if (numParallelDims > 3 && dim >= 2) {
if (!splitDim) {
splitDim =
buildHALWorkgroupInfoOp<IREE::HAL::InterfaceWorkgroupIDOp>(
Could we use `kNumMaxParallelDims` instead of hard coding it?
linalg::LinalgLoopDistributionOptions getIREELinalgLoopDistributionOptions(
SmallVector<linalg::ProcInfo, 3> procInfo(numParallelDims);
Value splitDim;
for (size_t dim = 0; dim < numParallelDims; ++dim) {
+ if (numParallelDims > kNumMaxParallelDims &&
+ dim >= kNumMaxParallelDims - 1) {
if (!splitDim) {
splitDim =
buildHALWorkgroupInfoOp<IREE::HAL::InterfaceWorkgroupIDOp>( |
codereview_new_cpp_data_9459 | void addSPIRVWinogradVectorizePassPipeline(OpPassManager &pm) {
nestedModulePM.addPass(createCSEPass());
// Tile to GPU invocations and vectorize.
- nestedModulePM.addNestedPass<func::FuncOp>(
- createSPIRVCreateFastSlowPathPass());
nestedModulePM.addNestedPass<func::FuncOp>(createSPIRVAnnotateLoopsPass());
nestedModulePM.addPass(createCanonicalizerPass());
nestedModulePM.addPass(createCSEPass());
Do you need this pass right now given we don't have `tensor.pad` op? It might be useful if later we change to emit `tensor.pad`; but we can remove it for now?
void addSPIRVWinogradVectorizePassPipeline(OpPassManager &pm) {
nestedModulePM.addPass(createCSEPass());
// Tile to GPU invocations and vectorize.
nestedModulePM.addNestedPass<func::FuncOp>(createSPIRVAnnotateLoopsPass());
nestedModulePM.addPass(createCanonicalizerPass());
nestedModulePM.addPass(createCSEPass()); |
codereview_new_cpp_data_9460 | class SPIRVAnnotateLoopsPass final
void runOnOperation() override {
func::FuncOp funcOp = getOperation();
SmallVector<scf::ForOp, 4> forOps;
- bool afterWorkgroupLoops{false};
- funcOp.walk([&](Operation *op) {
- if (isa<IREE::Flow::DispatchTensorLoadOp>(op)) {
- afterWorkgroupLoops = true;
- }
- if (isa<IREE::Flow::DispatchTensorStoreOp>(op)) {
- afterWorkgroupLoops = false;
- }
- if (afterWorkgroupLoops) {
- if (auto forOp = dyn_cast<scf::ForOp>(op)) forOps.push_back(forOp);
- }
});
MLIRContext *context = &getContext();
OpBuilder builder(context);
const char *attrName = getSPIRVDistributeAttrName();
- // Can only distribute to a maximum of 3 loops
- int maxIndex{2};
for (auto forOp : llvm::enumerate(forOps)) {
- if (forOp.index() > maxIndex) break;
forOp.value()->setAttr(attrName, builder.getIndexAttr(forOp.index()));
}
}
We can use `kNumGPUDims` in `Codegen/Utils/GPUUtils.h`.
class SPIRVAnnotateLoopsPass final
void runOnOperation() override {
func::FuncOp funcOp = getOperation();
SmallVector<scf::ForOp, 4> forOps;
+ funcOp.walk([&](scf::ForOp forOp) {
+ if (!isTiledAndDistributedLoop(forOp)) forOps.push_back(forOp);
});
MLIRContext *context = &getContext();
OpBuilder builder(context);
const char *attrName = getSPIRVDistributeAttrName();
for (auto forOp : llvm::enumerate(forOps)) {
+ if (forOp.index() > kNumGPUDims) break;
forOp.value()->setAttr(attrName, builder.getIndexAttr(forOp.index()));
}
} |
codereview_new_cpp_data_9461 | static LogicalResult setWinogradOpConfig(
spirv::ResourceLimitsAttr limits,
IREE::LinalgExt::WinogradInputTransformOp op) {
// Tiling is already done by tile and decompose, so we only set pipeline and
- // workgroup size
auto pipeline = CodeGenPipeline::SPIRVWinogradVectorize;
std::array<int64_t, 3> workgroupSize = {32, 4, 4};
TileSizesListType tileSizes = {{1, 32}};
Nit: also `.` to end the sentence.
static LogicalResult setWinogradOpConfig(
spirv::ResourceLimitsAttr limits,
IREE::LinalgExt::WinogradInputTransformOp op) {
// Tiling is already done by tile and decompose, so we only set pipeline and
+ // workgroup size. The tile sizes below are placeholders and were obtained
+ // by manual tuning on the AMD Navi2 GPU on a small set of convolution
+ // sizes found in the StableDiffusion model.
auto pipeline = CodeGenPipeline::SPIRVWinogradVectorize;
std::array<int64_t, 3> workgroupSize = {32, 4, 4};
TileSizesListType tileSizes = {{1, 32}}; |
codereview_new_cpp_data_9462 | static LogicalResult setWinogradOpConfig(
spirv::ResourceLimitsAttr limits,
IREE::LinalgExt::WinogradInputTransformOp op) {
// Tiling is already done by tile and decompose, so we only set pipeline and
- // workgroup size
auto pipeline = CodeGenPipeline::SPIRVWinogradVectorize;
std::array<int64_t, 3> workgroupSize = {32, 4, 4};
TileSizesListType tileSizes = {{1, 32}};
Are these numbers just placeholders for now? Would be nice to put some comments to explain why them.
static LogicalResult setWinogradOpConfig(
spirv::ResourceLimitsAttr limits,
IREE::LinalgExt::WinogradInputTransformOp op) {
// Tiling is already done by tile and decompose, so we only set pipeline and
+ // workgroup size. The tile sizes below are placeholders and were obtained
+ // by manual tuning on the AMD Navi2 GPU on a small set of convolution
+ // sizes found in the StableDiffusion model.
auto pipeline = CodeGenPipeline::SPIRVWinogradVectorize;
std::array<int64_t, 3> workgroupSize = {32, 4, 4};
TileSizesListType tileSizes = {{1, 32}}; |
codereview_new_cpp_data_9463 | static void addTileAndDistributePasses(
nestedModulePM.addPass(createCanonicalizerPass());
nestedModulePM.addPass(createCSEPass());
nestedModulePM.addNestedPass<func::FuncOp>(
- IREE::LinalgExt::createTileAndDecomposeWinogradInputTransformPass());
- nestedModulePM.addNestedPass<func::FuncOp>(
- IREE::LinalgExt::createTileAndDecomposeWinogradOutputTransformPass());
}
//===---------------------------------------------------------------------===//
See comment below. I think we can combine this pass and `TileAndDecomposeWinogradInputTransformPass` (and maybe rename it to `TileAndDecomposeWinogradTransformPass`.
static void addTileAndDistributePasses(
nestedModulePM.addPass(createCanonicalizerPass());
nestedModulePM.addPass(createCSEPass());
nestedModulePM.addNestedPass<func::FuncOp>(
+ IREE::LinalgExt::createTileAndDecomposeWinogradTransformPass());
}
//===---------------------------------------------------------------------===// |
codereview_new_cpp_data_9464 | struct SetMatmulEncoding : public OpRewritePattern<linalg::MatmulOp> {
Type rhsElemType = getElemType(origRhs);
Type outElemType = getElemType(origOut);
TensorEncoding lhsEncoding;
TensorEncoding rhsEncoding;
TensorEncoding outEncoding;
to be safe you probably need
```
if (!lhsElemType || !rhsElemType || !outElemType) {
return failure();
}
```
You can imagine why I am chuckling :P
struct SetMatmulEncoding : public OpRewritePattern<linalg::MatmulOp> {
Type rhsElemType = getElemType(origRhs);
Type outElemType = getElemType(origOut);
+ if (!lhsElemType || !rhsElemType || !outElemType) {
+ return failure();
+ }
+
TensorEncoding lhsEncoding;
TensorEncoding rhsEncoding;
TensorEncoding outEncoding; |
codereview_new_cpp_data_9466 | Session::Session(GlobalInit &globalInit) : globalInit(globalInit) {
bindingOptions = *globalInit.clBindingOptions;
inputOptions = *globalInit.clInputOptions;
highLevelOptimizationOptions = *globalInit.clHighLevelOptimizationOptions;
halTargetOptions = *globalInit.clHalTargetOptions;
vmTargetOptions = *globalInit.clVmTargetOptions;
bytecodeTargetOptions = *globalInit.clBytecodeTargetOptions;
needs to be guarded with IREE_HAVE_C_OUTPUT_FORMAT
Session::Session(GlobalInit &globalInit) : globalInit(globalInit) {
bindingOptions = *globalInit.clBindingOptions;
inputOptions = *globalInit.clInputOptions;
highLevelOptimizationOptions = *globalInit.clHighLevelOptimizationOptions;
+ schedulingOptions = *globalInit.clSchedulingOptions;
halTargetOptions = *globalInit.clHalTargetOptions;
vmTargetOptions = *globalInit.clVmTargetOptions;
bytecodeTargetOptions = *globalInit.clBytecodeTargetOptions; |
codereview_new_cpp_data_9467 | static FailureOr<Operation *> getRootOp(func::FuncOp funcOp) {
/// method returns a proper tile sizes vector for each op during tiling.
static SmallVector<Value> buildTileSizesForOp(OpBuilder &b, Operation *op,
SmallVector<int64_t> tileSizes) {
- auto linalgOp = dyn_cast<linalg::LinalgOp>(op);
- assert(linalgOp && "can only compute tile size on linalg ops");
SmallVector<int64_t> newTileSizes = tileSizes;
- newTileSizes.resize(linalgOp.getNumLoops(), /*default=*/0);
OpBuilder::InsertionGuard guard(b);
b.setInsertionPointToStart(
I think you can cast this to `TilingInterface` and assert. You need to only check that the op implements the tiling interface. Also I dont think you need to change the insertion point (but there is no harm in it).
static FailureOr<Operation *> getRootOp(func::FuncOp funcOp) {
/// method returns a proper tile sizes vector for each op during tiling.
static SmallVector<Value> buildTileSizesForOp(OpBuilder &b, Operation *op,
SmallVector<int64_t> tileSizes) {
+ auto tilingOp = cast<TilingInterface>(op);
SmallVector<int64_t> newTileSizes = tileSizes;
+ newTileSizes.resize(tilingOp.getLoopIteratorTypes().size(), /*default=*/0);
OpBuilder::InsertionGuard guard(b);
b.setInsertionPointToStart( |
codereview_new_cpp_data_9468 | struct LinalgStrategyTilePass
filter);
else
tilingPattern.add<LinalgSCFTilingPattern>(ctx, options, filter);
- if (anchorOpName == tensor::PadOp::getOperationName()) {
- linalg::LinalgTilingOptions legacyTilingOptions;
- legacyTilingOptions.setTileSizeComputationFunction(
- options.tileSizeComputationFunction);
- populatePadTensorTilingPatterns(tilingPattern, legacyTilingOptions);
- }
(void)applyPatternsAndFoldGreedily(funcOp, std::move(tilingPattern));
}
actually the upstream tiling handles pad ops as well. So you should be able to drop line 498 - 502.
struct LinalgStrategyTilePass
filter);
else
tilingPattern.add<LinalgSCFTilingPattern>(ctx, options, filter);
+
(void)applyPatternsAndFoldGreedily(funcOp, std::move(tilingPattern));
}
|
codereview_new_cpp_data_9472 | static SmallVector<int64_t> getLinalgExtDefaultWorkgroupTileSizes(
}
}
- OpBuilder builder(op.getContext());
- builder.setInsertionPoint(op);
- SmallVector<Range> iterationDomain = op.getIterationDomain(builder);
- for (int i = 0, e = std::min<int64_t>(numLoops, workgroupTileSizes.size());
- i < e; ++i) {
- Optional<int64_t> cstSize = getConstantIntValue(iterationDomain[i].size);
- if (workgroupTileSizes[i] && cstSize) {
- workgroupTileSizes[i] = std::min(workgroupTileSizes[i], cstSize.value());
- }
- }
-
return workgroupTileSizes;
}
Uggh! I know why we need this, I wish we didnt.... We probably need a `getStaticIterationDomain` method in `TilingInterface`....
Edit: Could we move this part into the `setRootConfig` of the `unpack` op. Then a method to unpack op to get the static iteration domain can avoid using the builder?
static SmallVector<int64_t> getLinalgExtDefaultWorkgroupTileSizes(
}
}
return workgroupTileSizes;
}
|
codereview_new_cpp_data_9473 | SmallVector<int64_t> computeInterchangeFromDimPos(ArrayRef<int64_t> dimsPos,
}
Value createValueFrom2DConstant(const float *val, int64_t rows, int64_t cols,
- bool transpose, Location loc,
- PatternRewriter &rewriter) {
- SmallVector<float> vector(rows * cols, 0.0);
- for (int i = 0; i < rows; i++) {
- for (int j = 0; j < cols; j++) {
- if (!transpose) {
- vector[i * cols + j] = val[i * cols + j];
- } else {
- vector[j * rows + i] = val[i * cols + j];
- }
- }
- }
SmallVector<int64_t> shape{rows, cols};
- if (transpose)
- shape = {cols, rows};
return rewriter.create<arith::ConstantOp>(
loc, DenseFPElementsAttr::get(
RankedTensorType::get(shape, rewriter.getF32Type()), vector));
I think you can just create an `ArrayRef` directly from the pointer passed in, and pass the`ArrayRef` into the`::get` method. The copy seems unnecessary.
SmallVector<int64_t> computeInterchangeFromDimPos(ArrayRef<int64_t> dimsPos,
}
Value createValueFrom2DConstant(const float *val, int64_t rows, int64_t cols,
+ Location loc, PatternRewriter &rewriter) {
+ ArrayRef<float> vector(val, rows * cols);
SmallVector<int64_t> shape{rows, cols};
return rewriter.create<arith::ConstantOp>(
loc, DenseFPElementsAttr::get(
RankedTensorType::get(shape, rewriter.getF32Type()), vector)); |
codereview_new_cpp_data_9474 | MaterializeEncodingConversionTarget::MaterializeEncodingConversionTarget(
// Mark any operation that has operands/results with encoding as
// illegal.
markUnknownOpDynamicallyLegal([=](Operation *op) {
- for (auto v : op->getOperands()) {
- if (typeHasEncoding(v.getType()))
- return false;
- }
- for (auto t : op->getResultTypes()) {
auto tensorType = t.dyn_cast<RankedTensorType>();
- if (tensorType && tensorType.getEncoding())
- return false;
- }
- return true;
});
}
Is this correct. Where is the `typeHasEncoding` method defined?
MaterializeEncodingConversionTarget::MaterializeEncodingConversionTarget(
// Mark any operation that has operands/results with encoding as
// illegal.
markUnknownOpDynamicallyLegal([=](Operation *op) {
+ auto typeHasEncoding = [=](Type t) -> bool {
auto tensorType = t.dyn_cast<RankedTensorType>();
+ return tensorType && tensorType.getEncoding();
+ };
+ auto valueHasEncoding = [=](Value v) -> bool {
+ return typeHasEncoding(v.getType());
+ };
+ bool hasOperandOrResultsWithEncoding =
+ llvm::any_of(op->getOperands(), valueHasEncoding) ||
+ llvm::any_of(op->getResultTypes(), typeHasEncoding);
+ return !hasOperandOrResultsWithEncoding;
});
}
|
codereview_new_cpp_data_9475 | static LogicalResult setReductionConfig(const spirv::TargetEnv &targetEnv,
if (bitWidth != 32) return failure();
// Let each thread handle `vectorSize` elements.
- const unsigned largestLoadSizeInBits = 128;
- unsigned vectorSize = largestLoadSizeInBits / bitWidth;
while ((*dimSize / vectorSize) % subgroupSize != 0) vectorSize /= 2;
// TODO: Add reduction tiling to handle larger reductions.
We have a `kMaxVectorNumBits` defined in this file; just use that?
static LogicalResult setReductionConfig(const spirv::TargetEnv &targetEnv,
if (bitWidth != 32) return failure();
// Let each thread handle `vectorSize` elements.
+ unsigned vectorSize = kMaxVectorNumBits / bitWidth;
while ((*dimSize / vectorSize) % subgroupSize != 0) vectorSize /= 2;
// TODO: Add reduction tiling to handle larger reductions. |
codereview_new_cpp_data_9482 | float loss[] = {1.0f};
void print_state() {
fprintf(stdout, "Weights:");
for (iree_host_size_t i = 0; i < IREE_ARRAYSIZE(w); ++i) {
- printf(" %f", w[i]);
}
fprintf(stdout, ", Bias: %f", b[0]);
fprintf(stdout, ", Loss: %f\n", loss[0]);
nit: one more `printf` to change to `fprintf`
```suggestion
fprintf(stdout, " %f", w[i]);
```
float loss[] = {1.0f};
void print_state() {
fprintf(stdout, "Weights:");
for (iree_host_size_t i = 0; i < IREE_ARRAYSIZE(w); ++i) {
+ fprintf(stdout, " %f", w[i]);
}
fprintf(stdout, ", Bias: %f", b[0]);
fprintf(stdout, ", Loss: %f\n", loss[0]); |
codereview_new_cpp_data_9485 | void setTranslationInfo(IREE::HAL::ExecutableExportOp exportOp,
// operations.
// ===----------------------------------------------------------------------===//
-Operation *getLoweringConfigCarryingOp(ArrayRef<Operation *> computeOps) {
for (Operation *op : computeOps) {
if (getLoweringConfig(op)) return op;
}
- return nullptr;
}
IREE::Codegen::LoweringConfigAttr getLoweringConfig(Operation *op) {
return op->getAttrOfType<IREE::Codegen::LoweringConfigAttr>(kConfigAttrName);
}
-IREE::Codegen::LoweringConfigAttr getLoweringConfig(
ArrayRef<Operation *> computeOps) {
- Operation *op = getLoweringConfigCarryingOp(computeOps);
- if (!op) return nullptr;
- return getLoweringConfig(op);
}
SmallVector<int64_t> getTileSizes(Operation *op, unsigned level) {
If possible make this an `inline ` function in the header file.
void setTranslationInfo(IREE::HAL::ExecutableExportOp exportOp,
// operations.
// ===----------------------------------------------------------------------===//
+FailureOr<Operation *> getLoweringConfigCarryingOp(
+ ArrayRef<Operation *> computeOps) {
for (Operation *op : computeOps) {
if (getLoweringConfig(op)) return op;
}
+ return failure();
}
IREE::Codegen::LoweringConfigAttr getLoweringConfig(Operation *op) {
return op->getAttrOfType<IREE::Codegen::LoweringConfigAttr>(kConfigAttrName);
}
+FailureOr<IREE::Codegen::LoweringConfigAttr> getLoweringConfig(
ArrayRef<Operation *> computeOps) {
+ FailureOr<Operation *> op = getLoweringConfigCarryingOp(computeOps);
+ if (failed(op)) return failure();
+ return getLoweringConfig(*op);
}
SmallVector<int64_t> getTileSizes(Operation *op, unsigned level) { |
codereview_new_cpp_data_9486 | void setTranslationInfo(IREE::HAL::ExecutableExportOp exportOp,
// operations.
// ===----------------------------------------------------------------------===//
-Operation *getLoweringConfigCarryingOp(ArrayRef<Operation *> computeOps) {
for (Operation *op : computeOps) {
if (getLoweringConfig(op)) return op;
}
- return nullptr;
}
IREE::Codegen::LoweringConfigAttr getLoweringConfig(Operation *op) {
return op->getAttrOfType<IREE::Codegen::LoweringConfigAttr>(kConfigAttrName);
}
-IREE::Codegen::LoweringConfigAttr getLoweringConfig(
ArrayRef<Operation *> computeOps) {
- Operation *op = getLoweringConfigCarryingOp(computeOps);
- if (!op) return nullptr;
- return getLoweringConfig(op);
}
SmallVector<int64_t> getTileSizes(Operation *op, unsigned level) {
Echo of comment from below. Maybe be better to avoid passing `nullptr`s around
void setTranslationInfo(IREE::HAL::ExecutableExportOp exportOp,
// operations.
// ===----------------------------------------------------------------------===//
+FailureOr<Operation *> getLoweringConfigCarryingOp(
+ ArrayRef<Operation *> computeOps) {
for (Operation *op : computeOps) {
if (getLoweringConfig(op)) return op;
}
+ return failure();
}
IREE::Codegen::LoweringConfigAttr getLoweringConfig(Operation *op) {
return op->getAttrOfType<IREE::Codegen::LoweringConfigAttr>(kConfigAttrName);
}
+FailureOr<IREE::Codegen::LoweringConfigAttr> getLoweringConfig(
ArrayRef<Operation *> computeOps) {
+ FailureOr<Operation *> op = getLoweringConfigCarryingOp(computeOps);
+ if (failed(op)) return failure();
+ return getLoweringConfig(*op);
}
SmallVector<int64_t> getTileSizes(Operation *op, unsigned level) { |
codereview_new_cpp_data_9489 | static iree_status_t print_buffer_view(iree_hal_device_t* device,
if (iree_status_is_ok(status)) {
status = iree_hal_semaphore_create(device, 0ull, &fence_semaphore);
}
- uint64_t wait_value = 0ull;
uint64_t signal_value = 1ull;
if (iree_status_is_ok(status)) {
- iree_hal_semaphore_list_t wait_semaphores = {
- .count = 0,
- .semaphores = NULL,
- .payload_values = &wait_value,
- };
iree_hal_semaphore_list_t signal_semaphores = {
.count = 1,
.semaphores = &fence_semaphore,
.payload_values = &signal_value,
};
- status = iree_hal_device_queue_execute(device, IREE_HAL_QUEUE_AFFINITY_ANY,
- wait_semaphores, signal_semaphores,
- 1, &command_buffer);
}
// TODO(scotttodd): Make this async - pass a wait source to iree_loop_wait_one
if (iree_status_is_ok(status)) {
you can use `iree_hal_semaphore_list_empty()` for this one
static iree_status_t print_buffer_view(iree_hal_device_t* device,
if (iree_status_is_ok(status)) {
status = iree_hal_semaphore_create(device, 0ull, &fence_semaphore);
}
uint64_t signal_value = 1ull;
if (iree_status_is_ok(status)) {
iree_hal_semaphore_list_t signal_semaphores = {
.count = 1,
.semaphores = &fence_semaphore,
.payload_values = &signal_value,
};
+ status = iree_hal_device_queue_execute(
+ device, IREE_HAL_QUEUE_AFFINITY_ANY, iree_hal_semaphore_list_empty(),
+ signal_semaphores, 1, &command_buffer);
}
// TODO(scotttodd): Make this async - pass a wait source to iree_loop_wait_one
if (iree_status_is_ok(status)) { |
codereview_new_cpp_data_9490 | static bool isFusableWithProducer(OpOperand &operand, bool aggressiveFusion) {
Operation *producer = operand.get().getDefiningOp();
Operation *consumer = operand.getOwner();
- // Fuse linalg ops with set encoding op if the operand is an `outs` value.
- if (isa<linalg::LinalgOp>(consumer) &&
- isa<IREE::LinalgExt::SetEncodingOp>(producer)) {
- return cast<DestinationStyleOpInterface>(consumer).isDpsInit(&operand);
- }
-
if (!isa<linalg::LinalgOp>(consumer) || !isa<linalg::LinalgOp>(producer)) {
return false;
}
I could be wrong. Why fuse linalg_ext.set_encoding + linalg? Shouldn't we fuse linalg + linalg_ext.set_encoding?
Should this be
```suggestion
if (isa<linalg::LinalgOp>(producer) &&
isa<IREE::LinalgExt::SetEncodingOp>(consumer)) {
return cast<DestinationStyleOpInterface>(producer).isDpsInit(&operand);
}
```
static bool isFusableWithProducer(OpOperand &operand, bool aggressiveFusion) {
Operation *producer = operand.get().getDefiningOp();
Operation *consumer = operand.getOwner();
if (!isa<linalg::LinalgOp>(consumer) || !isa<linalg::LinalgOp>(producer)) {
return false;
} |
codereview_new_cpp_data_9491 | SmallVector<Operation *>
UnPackOp::getTiledImplementation(OpBuilder &builder,
ArrayRef<OpFoldResult> offsets,
ArrayRef<OpFoldResult> sizes) {
if (!hasTensorSemantics())
return {};
Location loc = getLoc();
auto ctx = builder.getContext();
- // Take the minimum of two integers.
- auto idMap = AffineMap::getMultiDimIdentityMap(2, ctx);
- auto min = [&](OpFoldResult v1, OpFoldResult v2) -> OpFoldResult {
- return makeComposedFoldedAffineMin(builder, loc, idMap, {v1, v2});
- };
-
AffineExpr dim0, dim1;
bindDims(ctx, dim0, dim1);
auto addMap = AffineMap::get(2, 0, {dim0 + dim1});
Why do we need to be at the tensor level? It is because the op needs an output tensor?
SmallVector<Operation *>
UnPackOp::getTiledImplementation(OpBuilder &builder,
ArrayRef<OpFoldResult> offsets,
ArrayRef<OpFoldResult> sizes) {
+ // TODO(hanchung): Extend it to handle memref version.
+ // Tiling on buffers needs extra buffer because tiled unpack op could produce
+ // more data for incomplete tiles. Tiling on tensors satisfies IREE's needs.
if (!hasTensorSemantics())
return {};
Location loc = getLoc();
auto ctx = builder.getContext();
AffineExpr dim0, dim1;
bindDims(ctx, dim0, dim1);
auto addMap = AffineMap::get(2, 0, {dim0 + dim1}); |
codereview_new_cpp_data_9493 | struct DetachElementwisePattern
if (!linalgOp.hasTensorSemantics()) return failure();
// Nothing to do if the output tensor operand is already a fill op.
- linalg::OpOperandVector outputOperands = linalgOp.hasBufferSemantics()
- ? linalg::OpOperandVector()
- : linalgOp.getOutputOperands();
// Right now all the cases we see have one output. This can be relaxed once
// we see multiple output ops.
if (outputOperands.size() != 1) return failure();
Nit: replace with
```
linalg::OpOperandVector outputOperands;
if (linalgOp.hasBufferSemantics())
outputOperands = linalgOp.getOutputOperands();
```
struct DetachElementwisePattern
if (!linalgOp.hasTensorSemantics()) return failure();
// Nothing to do if the output tensor operand is already a fill op.
+ linalg::OpOperandVector outputOperands;
+ if (!linalgOp.hasBufferSemantics()) {
+ outputOperands = linalgOp.getOutputOperands();
+ }
// Right now all the cases we see have one output. This can be relaxed once
// we see multiple output ops.
if (outputOperands.size() != 1) return failure(); |
codereview_new_cpp_data_9494 | static LogicalResult setContractConfig(func::FuncOp entryPoint,
const int64_t tileX = config.tileSize[0];
const int64_t tileY = config.tileSize[1];
const int64_t tileK = config.tileSize[2];
- const int64_t workgroupSize[] = {config.workgroupSize[0],
- config.workgroupSize[1],
- config.workgroupSize[2]};
return setMatmulConfig(
tileX, tileY, tileK, workgroupSize, softwarePipelineDepthSimt,
IREE::Codegen::DispatchLoweringPassPipeline::LLVMGPUMatmulSimt);
nit: let's use a `std::array<int64_t, 3>`
static LogicalResult setContractConfig(func::FuncOp entryPoint,
const int64_t tileX = config.tileSize[0];
const int64_t tileY = config.tileSize[1];
const int64_t tileK = config.tileSize[2];
+ const std::array<int64_t, 3> workgroupSize{config.workgroupSize[0],
+ config.workgroupSize[1],
+ config.workgroupSize[2]};
return setMatmulConfig(
tileX, tileY, tileK, workgroupSize, softwarePipelineDepthSimt,
IREE::Codegen::DispatchLoweringPassPipeline::LLVMGPUMatmulSimt); |
codereview_new_cpp_data_9495 | static SmallVector<Value> buildTileSizesForOp(OpBuilder &b, Operation *op,
ArrayRef<int64_t> tileSizes) {
auto tilingOp = cast<TilingInterface>(op);
- SmallVector<int64_t> newTileSizes = llvm::to_vector(tileSizes);
newTileSizes.resize(tilingOp.getLoopIteratorTypes().size(), /*default=*/0);
OpBuilder::InsertionGuard guard(b);
[optional] we can use the constructor, so this can also be
```suggestion
SmallVector<int64_t> newTileSizes(tileSizes);
```
static SmallVector<Value> buildTileSizesForOp(OpBuilder &b, Operation *op,
ArrayRef<int64_t> tileSizes) {
auto tilingOp = cast<TilingInterface>(op);
+ SmallVector<int64_t> newTileSizes(tileSizes);
newTileSizes.resize(tilingOp.getLoopIteratorTypes().size(), /*default=*/0);
OpBuilder::InsertionGuard guard(b); |
codereview_new_cpp_data_9497 | static LogicalResult duplicateInitTensorOps(OpBuilder &b,
return success();
}
-static SmallVector<NamedAttribute> PruneAttributeList(
- linalg::GenericOp op, bool useWARForCooperativeMatrixCodegen = false) {
auto opAttributes = op.getAttributeNames();
llvm::StringSet<> elidedAttrs;
elidedAttrs.insert(opAttributes.begin(), opAttributes.end());
The change is not needed. `useWARForCooperativeMatrixCodegen` is not used in the method.
Also it reminds me that we can replace the uses with `linalg::getPrunedAttributeList`. I added the method and cleaned it up for MHLO repo, but not IREE. I can send a PR for it.
static LogicalResult duplicateInitTensorOps(OpBuilder &b,
return success();
}
+static SmallVector<NamedAttribute> PruneAttributeList(linalg::GenericOp op) {
auto opAttributes = op.getAttributeNames();
llvm::StringSet<> elidedAttrs;
elidedAttrs.insert(opAttributes.begin(), opAttributes.end()); |
codereview_new_cpp_data_9498 | static void tileAndDistributeToWorkgroup(
}
static void tileAndBufferize(OpPassManager &pm) {
- tileAndDistributeToWorkgroup(pm, /*useWARForCooperativeMatrixCodegen =*/true);
auto &nestedModulePM = pm.nest<ModuleOp>();
addBufferizePasses(nestedModulePM);
style nit: we don't need braces before and after `=`.
```suggestion
tileAndDistributeToWorkgroup(pm, /*useWARForCooperativeMatrixCodegen=*/true);
```
https://google.github.io/styleguide/cppguide.html#Function_Argument_Comments
static void tileAndDistributeToWorkgroup(
}
static void tileAndBufferize(OpPassManager &pm) {
+ tileAndDistributeToWorkgroup(pm, /*useWARForCooperativeMatrixCodegen=*/true);
auto &nestedModulePM = pm.nest<ModuleOp>();
addBufferizePasses(nestedModulePM); |
codereview_new_cpp_data_9499 |
#include "mlir/Transforms/GreedyPatternRewriteDriver.h"
#define DEBUG_TYPE "kernel-dispatch"
-#define DBGS() (llvm::dbgs())
-#define KD_DBGS() (DBGS() << '[' << DEBUG_TYPE << "] ")
namespace mlir {
namespace iree_compiler {
The `DBGS` is not used by others. Maybe combine two macro and use `DBGS`?
```suggestion
#define DEBUG_TYPE "kernel-dispatch"
#define DBGS() (llvm::dbgs()<< '[' << DEBUG_TYPE << "] ")
```
#include "mlir/Transforms/GreedyPatternRewriteDriver.h"
#define DEBUG_TYPE "kernel-dispatch"
+#define KD_DBGS() (llvm::dbgs() << '[' << DEBUG_TYPE << "] ")
namespace mlir {
namespace iree_compiler { |
codereview_new_cpp_data_9500 | PackOp::getTiledImplementation(OpBuilder &builder,
// Take the minimum of two integers.
auto idMap = AffineMap::getMultiDimIdentityMap(2, ctx);
auto min = [&](OpFoldResult v1, OpFoldResult v2) -> OpFoldResult {
- return builder.createOrFold<AffineMinOp>(
- loc, idMap,
- ValueRange{getValueOrCreateConstantIndexOp(builder, loc, v1),
- getValueOrCreateConstantIndexOp(builder, loc, v2)});
};
// Subtract two integers.
AffineExpr dim0, dim1;
bindDims(ctx, dim0, dim1);
auto subMap = AffineMap::get(2, 0, {dim0 - dim1});
auto sub = [&](OpFoldResult v1, OpFoldResult v2) -> OpFoldResult {
- return builder.createOrFold<AffineApplyOp>(
- loc, subMap,
- ValueRange{getValueOrCreateConstantIndexOp(builder, loc, v1),
- getValueOrCreateConstantIndexOp(builder, loc, v2)});
};
int64_t inputRank = getInputRank();
I think now you can use `makeComposedFoldedAffineApplyOp` . That probably gives you what you want.
PackOp::getTiledImplementation(OpBuilder &builder,
// Take the minimum of two integers.
auto idMap = AffineMap::getMultiDimIdentityMap(2, ctx);
auto min = [&](OpFoldResult v1, OpFoldResult v2) -> OpFoldResult {
+ return makeComposedFoldedAffineMin(builder, loc, idMap, {v1, v2});
};
// Subtract two integers.
AffineExpr dim0, dim1;
bindDims(ctx, dim0, dim1);
auto subMap = AffineMap::get(2, 0, {dim0 - dim1});
auto sub = [&](OpFoldResult v1, OpFoldResult v2) -> OpFoldResult {
+ return makeComposedFoldedAffineApply(builder, loc, subMap, {v1, v2});
};
int64_t inputRank = getInputRank(); |
codereview_new_cpp_data_9501 | LogicalResult detail::verifyGlobalAddressOp(
GlobalAddressOpInterface addressOp, SymbolTableCollection &symbolTable) {
if (!isa_and_nonnull<IREE::Util::GlobalOpInterface>(
symbolTable.lookupNearestSymbolFrom(addressOp.getOperation(),
- addressOp.getGlobalAttr())))
return addressOp->emitOpError(
"attribute 'global' failed to satisfy constraint: flat symbol "
"reference attribute referencing to a 'IREE::Util::GlobalOpInterface' "
"symbol");
auto globalOp =
lookupGlobalOp(addressOp, addressOp.getGlobalAttr(), symbolTable);
if (!globalOp) {
style nit: we use braces around multi-line if statements
LogicalResult detail::verifyGlobalAddressOp(
GlobalAddressOpInterface addressOp, SymbolTableCollection &symbolTable) {
if (!isa_and_nonnull<IREE::Util::GlobalOpInterface>(
symbolTable.lookupNearestSymbolFrom(addressOp.getOperation(),
+ addressOp.getGlobalAttr()))) {
return addressOp->emitOpError(
"attribute 'global' failed to satisfy constraint: flat symbol "
"reference attribute referencing to a 'IREE::Util::GlobalOpInterface' "
"symbol");
+ }
auto globalOp =
lookupGlobalOp(addressOp, addressOp.getGlobalAttr(), symbolTable);
if (!globalOp) { |
codereview_new_cpp_data_9503 | static LogicalResult setWarpReductionConfig(func::FuncOp entryPoint,
return success();
}
static LogicalResult setTransposeConfig(func::FuncOp entryPoint,
linalg::LinalgOp linalgOp) {
LinalgOpInfo opInfo(linalgOp, sharedMemTransposeFilter);
// Checks preconditions for shared mem transpose.
if (!opInfo.isTranspose() || opInfo.isDynamic() || opInfo.isReduction() ||
- !opInfo.isTwoThreeLoops()) {
return failure();
}
yeah, I would use linalgOp for the simple checks here and move the `isTwoThreeLoops` check to this file since it's a specific check.
static LogicalResult setWarpReductionConfig(func::FuncOp entryPoint,
return success();
}
+static bool hasTwoOrThreeLoopsInfo(linalg::LinalgOp linalgOp) {
+ return linalgOp.getNumParallelLoops() >= 2 &&
+ linalgOp.getNumParallelLoops() <= 3;
+}
+
static LogicalResult setTransposeConfig(func::FuncOp entryPoint,
linalg::LinalgOp linalgOp) {
LinalgOpInfo opInfo(linalgOp, sharedMemTransposeFilter);
// Checks preconditions for shared mem transpose.
if (!opInfo.isTranspose() || opInfo.isDynamic() || opInfo.isReduction() ||
+ !isa<linalg::GenericOp>(linalgOp) || !hasTwoOrThreeLoopsInfo(linalgOp)) {
return failure();
}
|
codereview_new_cpp_data_9504 | void addGPUMatmulTensorCorePassPipeline(OpPassManager &pm,
}
void addGPUTransposePassPipeline(OpPassManager &pm) {
- // tileAndBufferize(pm);
tileAndDistributeToWorkgroup(pm);
-
auto &nestedModulePM = pm.nest<ModuleOp>();
- // Distribute linalg onto threads within the workgroup.
- // nestedModulePM.addNestedPass<func::FuncOp>(createLLVMGPUTileAndDistribute(
- // false, GPUPromoteSharedMemPattern::TransposeOpPattern));
nestedModulePM.addNestedPass<func::FuncOp>(
createRemoveSingleIterationLoopPass());
The pipeline looks good, you can remove this commented out lines
void addGPUMatmulTensorCorePassPipeline(OpPassManager &pm,
}
void addGPUTransposePassPipeline(OpPassManager &pm) {
tileAndDistributeToWorkgroup(pm);
auto &nestedModulePM = pm.nest<ModuleOp>();
nestedModulePM.addNestedPass<func::FuncOp>(
createRemoveSingleIterationLoopPass()); |
codereview_new_cpp_data_9505 | static LogicalResult setSPIRVOpConfig(const spirv::TargetEnv &targetEnv,
Operation *rootOp) {
if (IREE::Codegen::CompilationInfoAttr compilationInfo =
getCompilationInfo(rootOp)) {
- // If the op already has a lowering config coming from the IR use this and
- // bypass the heuristic.
return setUserConfig(entryPointFn, rootOp, compilationInfo);
}
... has a lowering configuration specified from the original source by the user, then use it directly.
static LogicalResult setSPIRVOpConfig(const spirv::TargetEnv &targetEnv,
Operation *rootOp) {
if (IREE::Codegen::CompilationInfoAttr compilationInfo =
getCompilationInfo(rootOp)) {
+ // If the op already has a lowering configuration specified from the
+ // original source by the user, then use it directly.
return setUserConfig(entryPointFn, rootOp, compilationInfo);
}
|
codereview_new_cpp_data_9506 | int main(int argc, char **argv) {
mlir::writeCodeToFile(module, outputFile->os()) outputFile->keep();
return success();
}
- llvm::errs() << "Unkonwn output format" << outputFormat << "\n";
return failure();
};
```suggestion
llvm::errs() << "Unknown output format" << outputFormat << "\n";
```
int main(int argc, char **argv) {
mlir::writeCodeToFile(module, outputFile->os()) outputFile->keep();
return success();
}
+ llvm::errs() << "Unknown output format" << outputFormat << "\n";
return failure();
};
|
codereview_new_cpp_data_9507 | int main(int argc, char **argv) {
mlir::writeCodeToFile(module, outputFile->os()) outputFile->keep();
return success();
}
- llvm::errs() << "Unkonwn output format" << outputFormat << "\n";
return failure();
};
```suggestion
llvm::errs() << "Unknown output format" << outputFormat << "\n";
```
int main(int argc, char **argv) {
mlir::writeCodeToFile(module, outputFile->os()) outputFile->keep();
return success();
}
+ llvm::errs() << "Unknown output format" << outputFormat << "\n";
return failure();
};
|
codereview_new_cpp_data_9508 | int main(int argc, char **argv) {
mlir::writeCodeToFile(module, outputFile->os()) outputFile->keep();
return success();
}
- llvm::errs() << "Unkonwn output format" << outputFormat << "\n";
return failure();
};
```suggestion
llvm::errs() << "Unknown output format" << outputFormat << "\n";
```
int main(int argc, char **argv) {
mlir::writeCodeToFile(module, outputFile->os()) outputFile->keep();
return success();
}
+ llvm::errs() << "Unknown output format" << outputFormat << "\n";
return failure();
};
|
codereview_new_cpp_data_9510 | struct ScatterOpImplicitBatch : public OpRewritePattern<mhlo::ScatterOp> {
auto indices = op.scatter_indices();
auto indicesTy = indices.getType().cast<ShapedType>();
- // Check whether indices
if (!indicesTy.hasRank()) return failure();
if (indicesTy.getRank() != 1 && indexVectorDim != 0) {
return rewriter.notifyMatchFailure(op,
I'm guessing this was an oversight?
struct ScatterOpImplicitBatch : public OpRewritePattern<mhlo::ScatterOp> {
auto indices = op.scatter_indices();
auto indicesTy = indices.getType().cast<ShapedType>();
+ // Check whether indices has no batch dimension.
if (!indicesTy.hasRank()) return failure();
if (indicesTy.getRank() != 1 && indexVectorDim != 0) {
return rewriter.notifyMatchFailure(op, |
codereview_new_cpp_data_9512 | spirv::DeviceType getDeviceType(const TargetTriple &triple) {
/// Returns the Vulkan version for the given target `triple`.
Vulkan::Version getVersion(const TargetTriple &triple) {
- // Android 11/12 stays at Vulkan 1.1.
if (triple.getOS() == TargetTripleOS::Android30 ||
triple.getOS() == TargetTripleOS::Android31) {
return Version::V_1_1;
Comment says 11/12 but code says 30/31. Maybe clarify OS version vs API version?
spirv::DeviceType getDeviceType(const TargetTriple &triple) {
/// Returns the Vulkan version for the given target `triple`.
Vulkan::Version getVersion(const TargetTriple &triple) {
+ // Android 11/12 (API level 30/31) stays at Vulkan 1.1.
if (triple.getOS() == TargetTripleOS::Android30 ||
triple.getOS() == TargetTripleOS::Android31) {
return Version::V_1_1; |
codereview_new_cpp_data_9513 | LogicalResult setAMDCodeGenConfig(const spirv::TargetEnv &targetEnv,
int subgroupSize = targetEnv.getResourceLimits().getSubgroupSize();
if (auto linalgOp = dyn_cast<linalg::LinalgOp>(rootOp)) {
- if (linalg::isaContractionOpInterface(linalgOp) &&
- llvm::is_contained({2u, 3u}, linalgOp.getNumParallelLoops())) {
return setAMDMatmulConfig(linalgOp, subgroupSize);
- }
}
return TypeSwitch<Operation *, LogicalResult>(rootOp)
nit: I wonder if it is worth making this check a helper? It might not be practical, so just a suggestion.
LogicalResult setAMDCodeGenConfig(const spirv::TargetEnv &targetEnv,
int subgroupSize = targetEnv.getResourceLimits().getSubgroupSize();
if (auto linalgOp = dyn_cast<linalg::LinalgOp>(rootOp)) {
+ if (isMatmulOrBatchMatmul(linalgOp))
return setAMDMatmulConfig(linalgOp, subgroupSize);
}
return TypeSwitch<Operation *, LogicalResult>(rootOp) |
codereview_new_cpp_data_9514 | static bool isTransposeOp(linalg::LinalgOp linalgOp) {
return false;
}
- // Only transpose static sizes
- if (inputShape[0] == ShapedType::kDynamicSize ||
- inputShape[1] == ShapedType::kDynamicSize ||
- outputShape[0] == ShapedType::kDynamicSize ||
- outputShape[1] == ShapedType::kDynamicSize) {
return false;
}
linalg has a `hasDynamicShape` member
static bool isTransposeOp(linalg::LinalgOp linalgOp) {
return false;
}
+ // Only transpose static shapes
+ if (linalgOp.hasDynamicShape()) {
return false;
}
|
codereview_new_cpp_data_9997 |
// SPDX - License - Identifier: GPL - 3.0 +
#include "MantidQtWidgets/Common/QtJobRunner.h"
-#include "MantidAPI/AlgorithmRuntimeProps.h"
-#include "MantidAPI/IAlgorithm.h"
#include "MantidQtWidgets/Common/BatchAlgorithmRunner.h"
#include "MantidQtWidgets/Common/ConfiguredAlgorithm.h"
#include "MantidQtWidgets/Common/IConfiguredAlgorithm.h"
Are `AlgorithmRuntimeProps.h` and `IAlgorithm.h` needed as imports if only usage for `IConfiguredAlgorithm_sptr ` has been added?
// SPDX - License - Identifier: GPL - 3.0 +
#include "MantidQtWidgets/Common/QtJobRunner.h"
#include "MantidQtWidgets/Common/BatchAlgorithmRunner.h"
#include "MantidQtWidgets/Common/ConfiguredAlgorithm.h"
#include "MantidQtWidgets/Common/IConfiguredAlgorithm.h" |
codereview_new_cpp_data_9999 | TimeSplitter::TimeSplitter(const Mantid::API::MatrixWorkspace_sptr &ws) {
const auto X = ws->binEdges(0);
const auto &Y = ws->y(0);
if (X.size() != Y.size() + 1) {
throw std::runtime_error(
"Size of x values must be one more than size of y values to construct TimeSplitter from MatrixWorkspace.");
according to the docstring, there should be a validation here that all values in `X` should be equal or greater than zero, otherwise throw a `RuntimeError`
TimeSplitter::TimeSplitter(const Mantid::API::MatrixWorkspace_sptr &ws) {
const auto X = ws->binEdges(0);
const auto &Y = ws->y(0);
+ if (std::any_of(X.begin(), X.end(), [](int i) { return i < 0; })) {
+ throw std::runtime_error("All X values in MatrixWorkspace must be >= 0 to construct TimeSplitter.");
+ }
if (X.size() != Y.size() + 1) {
throw std::runtime_error(
"Size of x values must be one more than size of y values to construct TimeSplitter from MatrixWorkspace."); |
codereview_new_cpp_data_10002 | static int start_process( struct vine_process *p, struct link *manager )
list_push_tail(coprocess_list, p->coprocess);
hash_table_insert(features, duty_name, (void **) 1);
send_features(manager);
- send_message(manager, "duty-update %d %s\n", p->task->task_id, "STARTED");
send_resource_update(manager);
}
}
Let's make this messages:
```C
send_message(manager, "info duty-update %d %s\n", p->task->task_id, "started");
```
The reason is that everything works fine if for something the message is not sent (e.g., different worker protocol). Also, since these messages appear in the debug log, lets use lowercase for "started", so it doesn't look out of place.
static int start_process( struct vine_process *p, struct link *manager )
list_push_tail(coprocess_list, p->coprocess);
hash_table_insert(features, duty_name, (void **) 1);
send_features(manager);
+ send_message(manager, "info duty-update %d %s\n", p->task->task_id, "started");
send_resource_update(manager);
}
} |
codereview_new_cpp_data_10003 | static vine_msg_code_t vine_manager_recv_no_retry(struct vine_manager *q, struct
result = handle_cache_update(q, w, line);
} else if (string_prefix_is(line, "cache-invalid")) {
result = handle_cache_invalid(q, w, line);
- }
-// else if (string_prefix_is(line, "worker-init")) {
-// result = handle_worker_init(q, w, line);
-// }
- else if (string_prefix_is(line, "transfer-address")) {
result = handle_transfer_address(q, w, line);
} else if( sscanf(line,"GET %s HTTP/%*d.%*d",path)==1) {
result = handle_http_request(q,w,path,stoptime);
This seems incomplete. Did you mean to finish this, or is it meant to be part of a future PR?
static vine_msg_code_t vine_manager_recv_no_retry(struct vine_manager *q, struct
result = handle_cache_update(q, w, line);
} else if (string_prefix_is(line, "cache-invalid")) {
result = handle_cache_invalid(q, w, line);
+ } else if (string_prefix_is(line, "transfer-address")) {
result = handle_transfer_address(q, w, line);
} else if( sscanf(line,"GET %s HTTP/%*d.%*d",path)==1) {
result = handle_http_request(q,w,path,stoptime); |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.