#Index: gcc/ChangeLog #=================================================================== #--- gcc/ChangeLog (revision 187465) #+++ gcc/ChangeLog (revision 187466) #@@ -1,3 +1,10 @@ #+2012-05-14 Richard Guenther #+ #+ PR tree-optimization/53331 #+ * tree-vect-data-refs.c (vect_verify_datarefs_alignment): Ignore #+ strided loads. #+ * tree-vect-stmts.c (vect_model_load_cost): Handle strided loads. #+ # 2012-05-14 Manuel López-Ibáñez # # PR 53063 Index: gcc/tree-vect-data-refs.c =================================================================== --- gcc/tree-vect-data-refs.c (revision 187465) +++ gcc/tree-vect-data-refs.c (revision 187466) @@ -1078,6 +1078,11 @@ vect_verify_datarefs_alignment (loop_vec || !STMT_VINFO_VECTORIZABLE (stmt_info)) continue; + /* Strided loads perform only component accesses, alignment is + irrelevant for them. */ + if (STMT_VINFO_STRIDE_LOAD_P (stmt_info)) + continue; + supportable_dr_alignment = vect_supportable_dr_alignment (dr, false); if (!supportable_dr_alignment) { Index: gcc/tree-vect-stmts.c =================================================================== --- gcc/tree-vect-stmts.c (revision 187465) +++ gcc/tree-vect-stmts.c (revision 187466) @@ -1032,10 +1032,19 @@ vect_model_load_cost (stmt_vec_info stmt } /* The loads themselves. */ - vect_get_load_cost (first_dr, ncopies, - ((!STMT_VINFO_GROUPED_ACCESS (stmt_info)) || group_size > 1 - || slp_node), - &inside_cost, &outside_cost); + if (STMT_VINFO_STRIDE_LOAD_P (stmt_info)) + { + /* N scalar loads plus gathering them into a vector. + ??? scalar_to_vec isn't the cost for that. */ + inside_cost += (vect_get_stmt_cost (scalar_load) * ncopies + * TYPE_VECTOR_SUBPARTS (STMT_VINFO_VECTYPE (stmt_info))); + inside_cost += ncopies * vect_get_stmt_cost (scalar_to_vec); + } + else + vect_get_load_cost (first_dr, ncopies, + ((!STMT_VINFO_GROUPED_ACCESS (stmt_info)) + || group_size > 1 || slp_node), + &inside_cost, &outside_cost); if (vect_print_dump_info (REPORT_COST)) fprintf (vect_dump, "vect_model_load_cost: inside_cost = %d, "